diff --git a/.gitattributes b/.gitattributes index f5039f86821f1ac6ebae61871e4f1b9bf940e7b4..0b1df3568b47d18b3969a938ed3bff22d82a0439 100644 --- a/.gitattributes +++ b/.gitattributes @@ -101,3 +101,4 @@ lib/python3.10/site-packages/av/audio/layout.cpython-310-x86_64-linux-gnu.so fil lib/python3.10/site-packages/av/audio/plane.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text lib/python3.10/site-packages/av/audio/stream.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text lib/python3.10/site-packages/av/audio/codeccontext.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +lib/python3.10/site-packages/av/audio/fifo.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text diff --git a/lib/python3.10/site-packages/av/audio/fifo.cpython-310-x86_64-linux-gnu.so b/lib/python3.10/site-packages/av/audio/fifo.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..16247b525e7cbc1596bb014c686ea136b1c6d8db --- /dev/null +++ b/lib/python3.10/site-packages/av/audio/fifo.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:39e9bb1e44c14e5091b8a3e2f21841ca942edc677dc7ad5050dc6d98fdb15108 +size 634401 diff --git a/lib/python3.10/site-packages/jsonschema_specifications-2024.10.1.dist-info/INSTALLER b/lib/python3.10/site-packages/jsonschema_specifications-2024.10.1.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..5c69047b2eb8235994febeeae1da4a82365a240a --- /dev/null +++ b/lib/python3.10/site-packages/jsonschema_specifications-2024.10.1.dist-info/INSTALLER @@ -0,0 +1 @@ +uv \ No newline at end of file diff --git a/lib/python3.10/site-packages/jsonschema_specifications-2024.10.1.dist-info/METADATA b/lib/python3.10/site-packages/jsonschema_specifications-2024.10.1.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..b4ced65633f7c8ef9bf38d0c26707ff1514fc7fe --- /dev/null +++ b/lib/python3.10/site-packages/jsonschema_specifications-2024.10.1.dist-info/METADATA @@ -0,0 +1,55 @@ +Metadata-Version: 2.3 +Name: jsonschema-specifications +Version: 2024.10.1 +Summary: The JSON Schema meta-schemas and vocabularies, exposed as a Registry +Project-URL: Documentation, https://jsonschema-specifications.readthedocs.io/ +Project-URL: Homepage, https://github.com/python-jsonschema/jsonschema-specifications +Project-URL: Issues, https://github.com/python-jsonschema/jsonschema-specifications/issues/ +Project-URL: Funding, https://github.com/sponsors/Julian +Project-URL: Tidelift, https://tidelift.com/subscription/pkg/pypi-jsonschema-specifications?utm_source=pypi-jsonschema-specifications&utm_medium=referral&utm_campaign=pypi-link +Project-URL: Source, https://github.com/python-jsonschema/jsonschema-specifications +Author-email: Julian Berman +License-File: COPYING +Keywords: data validation,json,json schema,jsonschema,validation +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: MIT License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: 3.13 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Topic :: File Formats :: JSON +Classifier: Topic :: File Formats :: JSON :: JSON Schema +Requires-Python: >=3.9 +Requires-Dist: referencing>=0.31.0 +Description-Content-Type: text/x-rst + +============================= +``jsonschema-specifications`` +============================= + +|PyPI| |Pythons| |CI| |ReadTheDocs| + +JSON support files from the `JSON Schema Specifications `_ (metaschemas, vocabularies, etc.), packaged for runtime access from Python as a `referencing-based Schema Registry `_. + +.. |PyPI| image:: https://img.shields.io/pypi/v/jsonschema-specifications.svg + :alt: PyPI version + :target: https://pypi.org/project/jsonschema-specifications/ + +.. |Pythons| image:: https://img.shields.io/pypi/pyversions/jsonschema-specifications.svg + :alt: Supported Python versions + :target: https://pypi.org/project/jsonschema-specifications/ + +.. |CI| image:: https://github.com/python-jsonschema/jsonschema-specifications/workflows/CI/badge.svg + :alt: Build status + :target: https://github.com/python-jsonschema/jsonschema-specifications/actions?query=workflow%3ACI + +.. |ReadTheDocs| image:: https://readthedocs.org/projects/jsonschema-specifications/badge/?version=stable&style=flat + :alt: ReadTheDocs status + :target: https://jsonschema-specifications.readthedocs.io/en/stable/ diff --git a/lib/python3.10/site-packages/jsonschema_specifications-2024.10.1.dist-info/RECORD b/lib/python3.10/site-packages/jsonschema_specifications-2024.10.1.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..aa70cdac0eb8ad52ae70c6dcb81393cb23c6e136 --- /dev/null +++ b/lib/python3.10/site-packages/jsonschema_specifications-2024.10.1.dist-info/RECORD @@ -0,0 +1,30 @@ +jsonschema_specifications-2024.10.1.dist-info/INSTALLER,sha256=5hhM4Q4mYTT9z6QB6PGpUAW81PGNFrYrdXMj4oM_6ak,2 +jsonschema_specifications-2024.10.1.dist-info/METADATA,sha256=-jCfClPka5D4aDTtJ683zNiEcSHXhPBLuk9r9XWwyHI,2985 +jsonschema_specifications-2024.10.1.dist-info/RECORD,, +jsonschema_specifications-2024.10.1.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jsonschema_specifications-2024.10.1.dist-info/WHEEL,sha256=1yFddiXMmvYK7QYTqtRNtX66WJ0Mz8PYEiEUoOUUxRY,87 +jsonschema_specifications-2024.10.1.dist-info/licenses/COPYING,sha256=QtzWNJX4e063x3V6-jebtVpT-Ur9el9lfZrfVyNuUVw,1057 +jsonschema_specifications/__init__.py,sha256=qoTB2DKY7qvNrGhMPH6gtmAJRLilmVQ-fFZwT6ryqw0,386 +jsonschema_specifications/_core.py,sha256=tFhc1CMleJ3AJOK_bjxOpFQTdrsUClFGfFxPBU_CebM,1140 +jsonschema_specifications/schemas/draft201909/metaschema.json,sha256=e3YbPhIfCgyh6ioLjizIVrz4AWBLgmjXG6yqICvAwTs,1785 +jsonschema_specifications/schemas/draft201909/vocabularies/applicator,sha256=aJUQDplyb7sQcFhRK77D7P1LJOj9L6zuPlBe5ysNTDE,1860 +jsonschema_specifications/schemas/draft201909/vocabularies/content,sha256=m31PVaTi_bAsQwBo_f-rxzKt3OI42j8d8mkCScM1MnQ,517 +jsonschema_specifications/schemas/draft201909/vocabularies/core,sha256=taLElX9kldClCB8ECevooU5BOayyA_x0hHH47eKvWyw,1531 +jsonschema_specifications/schemas/draft201909/vocabularies/meta-data,sha256=1H4kRd1qgicaKY2DzGxsuNSuHhXg3Fa-zTehY-zwEoY,892 +jsonschema_specifications/schemas/draft201909/vocabularies/validation,sha256=HlJsHTNac0gF_ILPV5jBK5YK19olF8Zs2lobCTWcPBw,2834 +jsonschema_specifications/schemas/draft202012/metaschema.json,sha256=Qdp29a-3zgYtJI92JGOpL3ykfk4PkFsiS6av7vkd7Q8,2452 +jsonschema_specifications/schemas/draft202012/vocabularies/applicator,sha256=xKbkFHuR_vf-ptwFjLG_k0AvdBS3ZXiosWqvHa1qrO8,1659 +jsonschema_specifications/schemas/draft202012/vocabularies/content,sha256=CDQ3R3ZOSlgUJieTz01lIFenkThjxZUNQyl-jh_axbY,519 +jsonschema_specifications/schemas/draft202012/vocabularies/core,sha256=wtEqjk3RHTNt_IOj9mOqTGnwtJs76wlP_rJbUxb0gD0,1564 +jsonschema_specifications/schemas/draft202012/vocabularies/format,sha256=UOu_55BhGoSbjMQAoJwdDg-2q1wNQ6DyIgH9NiUFa_Q,403 +jsonschema_specifications/schemas/draft202012/vocabularies/format-annotation,sha256=q8d1rf79idIjWBcNm_k_Tr0jSVY7u-3WDwK-98gSvMA,448 +jsonschema_specifications/schemas/draft202012/vocabularies/format-assertion,sha256=xSJCuaG7eGsmw-gset1CjDH5yW5XXc6Z5W6l_qptogw,445 +jsonschema_specifications/schemas/draft202012/vocabularies/meta-data,sha256=j3bW4U9Bubku-TO3CM3FFEyLUmhlGtEZGEhfsXVPHHY,892 +jsonschema_specifications/schemas/draft202012/vocabularies/unevaluated,sha256=Lb-8tzmUtnCwl2SSre4f_7RsIWgnhNL1pMpWH54tDLQ,506 +jsonschema_specifications/schemas/draft202012/vocabularies/validation,sha256=cBCjHlQfMtK-ch4t40jfdcmzaHaj7TBId_wKvaHTelg,2834 +jsonschema_specifications/schemas/draft3/metaschema.json,sha256=LPdfZENvtb43Si6qJ6uLfh_WUcm0ba6mxnsC_WTiRYs,2600 +jsonschema_specifications/schemas/draft4/metaschema.json,sha256=4UidC0dV8CeTMCWR0_y48Htok6gqlPJIlfjk7fEbguI,4357 +jsonschema_specifications/schemas/draft6/metaschema.json,sha256=wp386fVINcOgbAOzxdXsDtp3cGVo-cTffPvHVmpRAG0,4437 +jsonschema_specifications/schemas/draft7/metaschema.json,sha256=PVOSCIJhYGxVm2A_OFMpyfGrRbXWZ-uZBodFOwVdQF4,4819 +jsonschema_specifications/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +jsonschema_specifications/tests/test_jsonschema_specifications.py,sha256=WkbYRW6A6FoZ0rivShfqVLSCsAiHJ2x8TxqECJTXPTY,1106 diff --git a/lib/python3.10/site-packages/jsonschema_specifications-2024.10.1.dist-info/REQUESTED b/lib/python3.10/site-packages/jsonschema_specifications-2024.10.1.dist-info/REQUESTED new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/lib/python3.10/site-packages/jsonschema_specifications-2024.10.1.dist-info/WHEEL b/lib/python3.10/site-packages/jsonschema_specifications-2024.10.1.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..cdd68a497cdfa8d3f2b837225beacef711b85047 --- /dev/null +++ b/lib/python3.10/site-packages/jsonschema_specifications-2024.10.1.dist-info/WHEEL @@ -0,0 +1,4 @@ +Wheel-Version: 1.0 +Generator: hatchling 1.25.0 +Root-Is-Purelib: true +Tag: py3-none-any diff --git a/lib/python3.10/site-packages/numba/cuda/tests/cudapy/recursion_usecases.py b/lib/python3.10/site-packages/numba/cuda/tests/cudapy/recursion_usecases.py new file mode 100644 index 0000000000000000000000000000000000000000..b182359b11a25b60218ff4b0962d0158cae05e33 --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/tests/cudapy/recursion_usecases.py @@ -0,0 +1,100 @@ +""" +Usecases of recursive functions in the CUDA target, many derived from +numba/tests/recursion_usecases.py. + +Some functions are compiled at import time, hence a separate module. +""" + +from numba import cuda + + +@cuda.jit("i8(i8)", device=True) +def fib1(n): + if n < 2: + return n + # Note the second call does not use a named argument, unlike the CPU target + # usecase + return fib1(n - 1) + fib1(n - 2) + + +def make_fib2(): + @cuda.jit("i8(i8)", device=True) + def fib2(n): + if n < 2: + return n + return fib2(n - 1) + fib2(n - 2) + + return fib2 + + +fib2 = make_fib2() + + +@cuda.jit +def type_change_self(x, y): + if x > 1 and y > 0: + return x + type_change_self(x - y, y) + else: + return y + + +# Implicit signature +@cuda.jit(device=True) +def fib3(n): + if n < 2: + return n + + return fib3(n - 1) + fib3(n - 2) + + +# Run-away self recursion +@cuda.jit(device=True) +def runaway_self(x): + return runaway_self(x) + + +@cuda.jit(device=True) +def raise_self(x): + if x == 1: + raise ValueError("raise_self") + elif x > 0: + return raise_self(x - 1) + else: + return 1 + + +@cuda.jit(debug=True, opt=False) +def raise_self_kernel(x): + raise_self(x) + + +def make_optional_return_case(jit=lambda x: x): + @jit + def foo(x): + if x > 5: + return x - 1 + else: + return + + @jit + def bar(x): + out = foo(x) + if out is None: + return out + elif out < 8: + return out + else: + return x * bar(out) + + return bar + + +def make_growing_tuple_case(jit=lambda x: x): + # From issue #4387 + @jit + def make_list(n): + if n <= 0: + return None + + return (n, make_list(n - 1)) + return make_list diff --git a/lib/python3.10/site-packages/numba/cuda/tests/cudapy/test_boolean.py b/lib/python3.10/site-packages/numba/cuda/tests/cudapy/test_boolean.py new file mode 100644 index 0000000000000000000000000000000000000000..fc0568233a806961f86a5b6c1cb64441e5916b2e --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/tests/cudapy/test_boolean.py @@ -0,0 +1,24 @@ +import numpy as np +from numba.cuda.testing import unittest, CUDATestCase +from numba import cuda + + +def boolean_func(A, vertial): + if vertial: + A[0] = 123 + else: + A[0] = 321 + + +class TestCudaBoolean(CUDATestCase): + def test_boolean(self): + func = cuda.jit('void(float64[:], bool_)')(boolean_func) + A = np.array([0], dtype='float64') + func[1, 1](A, True) + self.assertTrue(A[0] == 123) + func[1, 1](A, False) + self.assertTrue(A[0] == 321) + + +if __name__ == '__main__': + unittest.main() diff --git a/lib/python3.10/site-packages/numba/cuda/tests/cudapy/test_cffi.py b/lib/python3.10/site-packages/numba/cuda/tests/cudapy/test_cffi.py new file mode 100644 index 0000000000000000000000000000000000000000..ee09fcc3129bdd8629b286f54772c593e94a3cb6 --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/tests/cudapy/test_cffi.py @@ -0,0 +1,33 @@ +import numpy as np + +from numba import cuda, types +from numba.cuda.testing import (skip_on_cudasim, test_data_dir, unittest, + CUDATestCase) +from numba.tests.support import skip_unless_cffi + + +@skip_unless_cffi +@skip_on_cudasim('Simulator does not support linking') +class TestCFFI(CUDATestCase): + def test_from_buffer(self): + import cffi + ffi = cffi.FFI() + + link = str(test_data_dir / 'jitlink.ptx') + sig = types.void(types.CPointer(types.int32)) + array_mutator = cuda.declare_device('array_mutator', sig) + + @cuda.jit(link=[link]) + def mutate_array(x): + x_ptr = ffi.from_buffer(x) + array_mutator(x_ptr) + + x = np.arange(2).astype(np.int32) + mutate_array[1, 1](x) + + # The foreign function should have copied element 1 to element 0 + self.assertEqual(x[0], x[1]) + + +if __name__ == '__main__': + unittest.main() diff --git a/lib/python3.10/site-packages/numba/cuda/tests/cudapy/test_debuginfo.py b/lib/python3.10/site-packages/numba/cuda/tests/cudapy/test_debuginfo.py new file mode 100644 index 0000000000000000000000000000000000000000..efe42b50ce31f2a5e18c64f77e8198eb2f8ad2f8 --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/tests/cudapy/test_debuginfo.py @@ -0,0 +1,221 @@ +from numba.tests.support import override_config +from numba.cuda.testing import skip_on_cudasim +from numba import cuda +from numba.core import types +from numba.cuda.testing import CUDATestCase +import itertools +import re +import unittest + + +@skip_on_cudasim('Simulator does not produce debug dumps') +class TestCudaDebugInfo(CUDATestCase): + """ + These tests only checks the compiled PTX for debuginfo section + """ + + def setUp(self): + super().setUp() + # If we're using LTO then we can't check the PTX in these tests, + # because we produce LTO-IR, which is opaque to the user. + # Additionally, LTO optimizes away the exception status due to an + # oversight in the way we generate it (it is not added to the used + # list). + self.skip_if_lto("Exceptions not supported with LTO") + + def _getasm(self, fn, sig): + fn.compile(sig) + return fn.inspect_asm(sig) + + def _check(self, fn, sig, expect): + asm = self._getasm(fn, sig=sig) + re_section_dbginfo = re.compile(r"\.section\s+\.debug_info\s+{") + match = re_section_dbginfo.search(asm) + assertfn = self.assertIsNotNone if expect else self.assertIsNone + assertfn(match, msg=asm) + + def test_no_debuginfo_in_asm(self): + @cuda.jit(debug=False) + def foo(x): + x[0] = 1 + + self._check(foo, sig=(types.int32[:],), expect=False) + + def test_debuginfo_in_asm(self): + @cuda.jit(debug=True, opt=False) + def foo(x): + x[0] = 1 + + self._check(foo, sig=(types.int32[:],), expect=True) + + def test_environment_override(self): + with override_config('CUDA_DEBUGINFO_DEFAULT', 1): + # Using default value + @cuda.jit(opt=False) + def foo(x): + x[0] = 1 + + self._check(foo, sig=(types.int32[:],), expect=True) + + # User override default value + @cuda.jit(debug=False) + def bar(x): + x[0] = 1 + + self._check(bar, sig=(types.int32[:],), expect=False) + + def test_issue_5835(self): + # Invalid debug metadata would segfault NVVM when any function was + # compiled with debug turned on and optimization off. This eager + # compilation should not crash anything. + @cuda.jit((types.int32[::1],), debug=True, opt=False) + def f(x): + x[0] = 0 + + def test_wrapper_has_debuginfo(self): + sig = (types.int32[::1],) + + @cuda.jit(sig, debug=True, opt=0) + def f(x): + x[0] = 1 + + llvm_ir = f.inspect_llvm(sig) + + defines = [line for line in llvm_ir.splitlines() + if 'define void @"_ZN6cudapy' in line] + + # Make sure we only found one definition + self.assertEqual(len(defines), 1) + + wrapper_define = defines[0] + self.assertIn('!dbg', wrapper_define) + + def test_debug_function_calls_internal_impl(self): + # Calling a function in a module generated from an implementation + # internal to Numba requires multiple modules to be compiled with NVVM - + # the internal implementation, and the caller. This example uses two + # modules because the `in (2, 3)` is implemented with: + # + # numba::cpython::listobj::in_seq::$3clocals$3e::seq_contains_impl$242( + # UniTuple, + # int + # ) + # + # This is condensed from this reproducer in Issue 5311: + # https://github.com/numba/numba/issues/5311#issuecomment-674206587 + + @cuda.jit((types.int32[:], types.int32[:]), debug=True, opt=False) + def f(inp, outp): + outp[0] = 1 if inp[0] in (2, 3) else 3 + + def test_debug_function_calls_device_function(self): + # Calling a device function requires compilation of multiple modules + # with NVVM - one for the caller and one for the callee. This checks + # that we don't cause an NVVM error in this case. + + @cuda.jit(device=True, debug=True, opt=0) + def threadid(): + return cuda.blockDim.x * cuda.blockIdx.x + cuda.threadIdx.x + + @cuda.jit((types.int32[:],), debug=True, opt=0) + def kernel(arr): + i = cuda.grid(1) + if i < len(arr): + arr[i] = threadid() + + def _test_chained_device_function(self, kernel_debug, f1_debug, f2_debug): + @cuda.jit(device=True, debug=f2_debug, opt=False) + def f2(x): + return x + 1 + + @cuda.jit(device=True, debug=f1_debug, opt=False) + def f1(x, y): + return x - f2(y) + + @cuda.jit((types.int32, types.int32), debug=kernel_debug, opt=False) + def kernel(x, y): + f1(x, y) + + kernel[1, 1](1, 2) + + def test_chained_device_function(self): + # Calling a device function that calls another device function from a + # kernel with should succeed regardless of which jit decorators have + # debug=True. See Issue #7159. + + debug_opts = itertools.product(*[(True, False)] * 3) + + for kernel_debug, f1_debug, f2_debug in debug_opts: + with self.subTest(kernel_debug=kernel_debug, + f1_debug=f1_debug, + f2_debug=f2_debug): + self._test_chained_device_function(kernel_debug, + f1_debug, + f2_debug) + + def _test_chained_device_function_two_calls(self, kernel_debug, f1_debug, + f2_debug): + + @cuda.jit(device=True, debug=f2_debug, opt=False) + def f2(x): + return x + 1 + + @cuda.jit(device=True, debug=f1_debug, opt=False) + def f1(x, y): + return x - f2(y) + + @cuda.jit(debug=kernel_debug, opt=False) + def kernel(x, y): + f1(x, y) + f2(x) + + kernel[1, 1](1, 2) + + def test_chained_device_function_two_calls(self): + # Calling a device function that calls a leaf device function from a + # kernel, and calling the leaf device function from the kernel should + # succeed, regardless of which jit decorators have debug=True. See + # Issue #7159. + + debug_opts = itertools.product(*[(True, False)] * 3) + + for kernel_debug, f1_debug, f2_debug in debug_opts: + with self.subTest(kernel_debug=kernel_debug, + f1_debug=f1_debug, + f2_debug=f2_debug): + self._test_chained_device_function_two_calls(kernel_debug, + f1_debug, + f2_debug) + + def test_chained_device_three_functions(self): + # Like test_chained_device_function, but with enough functions (three) + # to ensure that the recursion visits all the way down the call tree + # when fixing linkage of functions for debug. + def three_device_fns(kernel_debug, leaf_debug): + @cuda.jit(device=True, debug=leaf_debug, opt=False) + def f3(x): + return x * x + + @cuda.jit(device=True) + def f2(x): + return f3(x) + 1 + + @cuda.jit(device=True) + def f1(x, y): + return x - f2(y) + + @cuda.jit(debug=kernel_debug, opt=False) + def kernel(x, y): + f1(x, y) + + kernel[1, 1](1, 2) + + # Check when debug on the kernel, on the leaf, and not on any function. + three_device_fns(kernel_debug=True, leaf_debug=True) + three_device_fns(kernel_debug=True, leaf_debug=False) + three_device_fns(kernel_debug=False, leaf_debug=True) + three_device_fns(kernel_debug=False, leaf_debug=False) + + +if __name__ == '__main__': + unittest.main() diff --git a/lib/python3.10/site-packages/numba/cuda/tests/cudapy/test_exception.py b/lib/python3.10/site-packages/numba/cuda/tests/cudapy/test_exception.py new file mode 100644 index 0000000000000000000000000000000000000000..8891010410c9db97439eb460142b4ae4e7724fbe --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/tests/cudapy/test_exception.py @@ -0,0 +1,174 @@ +import numpy as np + +from numba import cuda +from numba.cuda.testing import unittest, xfail_unless_cudasim, CUDATestCase +from numba.core import config + + +class TestException(CUDATestCase): + def setUp(self): + super().setUp() + # LTO optimizes away the exception status due to an oversight + # in the way we generate it (it is not added to the used list). + self.skip_if_lto("Exceptions not supported with LTO") + + def test_exception(self): + def foo(ary): + x = cuda.threadIdx.x + if x == 2: + # NOTE: indexing with a out-of-bounds constant can fail at + # compile-time instead (because the getitem is rewritten as a + # static_getitem) + ary.shape[-x] + + unsafe_foo = cuda.jit(foo) + safe_foo = cuda.jit(debug=True, opt=False)(foo) + + if not config.ENABLE_CUDASIM: + # Simulator throws exceptions regardless of debug + # setting + unsafe_foo[1, 3](np.array([0, 1])) + + with self.assertRaises(IndexError) as cm: + safe_foo[1, 3](np.array([0, 1])) + self.assertIn("tuple index out of range", str(cm.exception)) + + def test_user_raise(self): + @cuda.jit(debug=True, opt=False) + def foo(do_raise): + if do_raise: + raise ValueError + + foo[1, 1](False) + with self.assertRaises(ValueError): + foo[1, 1](True) + + def case_raise_causing_warp_diverge(self, with_debug_mode): + """Testing issue #2655. + + Exception raising code can cause the compiler to miss location + of unifying branch target and resulting in unexpected warp + divergence. + """ + with_opt_mode = not with_debug_mode + + @cuda.jit(debug=with_debug_mode, opt=with_opt_mode) + def problematic(x, y): + tid = cuda.threadIdx.x + ntid = cuda.blockDim.x + + if tid > 12: + for i in range(ntid): + y[i] += x[i] // y[i] + + cuda.syncthreads() + if tid < 17: + for i in range(ntid): + x[i] += x[i] // y[i] + + @cuda.jit + def oracle(x, y): + tid = cuda.threadIdx.x + ntid = cuda.blockDim.x + + if tid > 12: + for i in range(ntid): + if y[i] != 0: + y[i] += x[i] // y[i] + + cuda.syncthreads() + if tid < 17: + for i in range(ntid): + if y[i] != 0: + x[i] += x[i] // y[i] + + n = 32 + got_x = 1. / (np.arange(n) + 0.01) + got_y = 1. / (np.arange(n) + 0.01) + problematic[1, n](got_x, got_y) + + expect_x = 1. / (np.arange(n) + 0.01) + expect_y = 1. / (np.arange(n) + 0.01) + oracle[1, n](expect_x, expect_y) + + np.testing.assert_almost_equal(expect_x, got_x) + np.testing.assert_almost_equal(expect_y, got_y) + + def test_raise_causing_warp_diverge(self): + """Test case for issue #2655. + """ + self.case_raise_causing_warp_diverge(with_debug_mode=False) + + # The following two cases relate to Issue #7806: Division by zero stops the + # kernel. https://github.com/numba/numba/issues/7806. + + def test_no_zero_division_error(self): + # When debug is False: + # - Division by zero raises no exception + # - Execution proceeds after a divide by zero + @cuda.jit + def f(r, x, y): + r[0] = y[0] / x[0] + r[1] = y[0] + + r = np.zeros(2) + x = np.zeros(1) + y = np.ones(1) + + f[1, 1](r, x, y) + + self.assertTrue(np.isinf(r[0]), 'Expected inf from div by zero') + self.assertEqual(r[1], y[0], 'Expected execution to continue') + + def test_zero_division_error_in_debug(self): + # When debug is True: + # - Zero by division raises an exception + # - Execution halts at the point of division by zero + @cuda.jit(debug=True, opt=False) + def f(r, x, y): + r[0] = y[0] / x[0] + r[1] = y[0] + + r = np.zeros(2) + x = np.zeros(1) + y = np.ones(1) + + # Simulator and device behaviour differs slightly in the exception + # raised - in debug mode, the CUDA target uses the Python error model, + # which gives a ZeroDivision error. The simulator uses NumPy with the + # error mode for division by zero set to raise, which results in a + # FloatingPointError instead. + if config.ENABLE_CUDASIM: + exc = FloatingPointError + else: + exc = ZeroDivisionError + + with self.assertRaises(exc): + f[1, 1](r, x, y) + + self.assertEqual(r[0], 0, 'Expected result to be left unset') + self.assertEqual(r[1], 0, 'Expected execution to stop') + + @xfail_unless_cudasim + def test_raise_in_device_function(self): + # This is an expected failure because reporting of exceptions raised in + # device functions does not work correctly - see Issue #8036: + # https://github.com/numba/numba/issues/8036 + msg = 'Device Function Error' + + @cuda.jit(device=True) + def f(): + raise ValueError(msg) + + @cuda.jit(debug=True) + def kernel(): + f() + + with self.assertRaises(ValueError) as raises: + kernel[1, 1]() + + self.assertIn(msg, str(raises.exception)) + + +if __name__ == '__main__': + unittest.main() diff --git a/lib/python3.10/site-packages/numba/cuda/tests/cudapy/test_freevar.py b/lib/python3.10/site-packages/numba/cuda/tests/cudapy/test_freevar.py new file mode 100644 index 0000000000000000000000000000000000000000..6b7b2d2abcc6a55558a2c7142809fa7571a9fde6 --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/tests/cudapy/test_freevar.py @@ -0,0 +1,29 @@ +import numpy as np + +from numba import cuda +from numba.cuda.testing import unittest, CUDATestCase + + +class TestFreeVar(CUDATestCase): + def test_freevar(self): + """Make sure we can compile the following kernel with freevar reference + in arguments to shared.array + """ + from numba import float32 + + size = 1024 + nbtype = float32 + + @cuda.jit("(float32[::1], intp)") + def foo(A, i): + "Dummy function" + sdata = cuda.shared.array(size, # size is freevar + dtype=nbtype) # nbtype is freevar + A[i] = sdata[i] + + A = np.arange(2, dtype="float32") + foo[1, 1](A, 0) + + +if __name__ == '__main__': + unittest.main() diff --git a/lib/python3.10/site-packages/numba/cuda/tests/cudapy/test_gufunc_scheduling.py b/lib/python3.10/site-packages/numba/cuda/tests/cudapy/test_gufunc_scheduling.py new file mode 100644 index 0000000000000000000000000000000000000000..fb8de3285f75b6372945667cd33b4f48b404cec3 --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/tests/cudapy/test_gufunc_scheduling.py @@ -0,0 +1,95 @@ +from numba.cuda.deviceufunc import GUFuncEngine +import unittest + + +def template(signature, shapes, expects): + gufb = GUFuncEngine.from_signature(signature) + sch = gufb.schedule(shapes) + for k, v in expects.items(): + got = getattr(sch, k) + if got != v: + fmt = 'error for %s: got=%s but expect=%s' + raise AssertionError(fmt % (k, got, v)) + + +class TestGUFuncScheduling(unittest.TestCase): + def test_signature_1(self): + signature = '(m, n), (n, p) -> (m, p)' + shapes = (100, 4, 5), (1, 5, 7) + expects = dict( + ishapes=[(4, 5), (5, 7)], + oshapes=[(4, 7)], + loopdims=(100,), + pinned=[False, True] + ) + template(signature, shapes, expects) + + def test_signature_2(self): + signature = '(m, n), (n, p) -> (m, p)' + shapes = (100, 4, 5), (100, 5, 7) + expects = dict( + ishapes=[(4, 5), (5, 7)], + oshapes=[(4, 7)], + loopdims=(100,), + pinned=[False, False] + ) + template(signature, shapes, expects) + + def test_signature_3(self): + signature = '(m, n), (n, p) -> (m, p)' + shapes = (12, 34, 4, 5), (12, 34, 5, 7) + expects = dict( + ishapes=[(4, 5), (5, 7)], + oshapes=[(4, 7)], + loopdims=(12, 34), + pinned=[False, False] + ) + template(signature, shapes, expects) + + def test_signature_4(self): + signature = '(m, n), (n, p) -> (m, p)' + shapes = (4, 5), (5, 7) + expects = dict( + ishapes=[(4, 5), (5, 7)], + oshapes=[(4, 7)], + loopdims=(), + pinned=[False, False] + ) + template(signature, shapes, expects) + + def test_signature_5(self): + signature = '(a), (a) -> (a)' + shapes = (5,), (5,) + expects = dict( + ishapes=[(5,), (5,)], + oshapes=[(5,)], + loopdims=(), + pinned=[False, False] + ) + template(signature, shapes, expects) + + def test_signature_6(self): + signature = '(), () -> ()' + shapes = (5,), (5,) + expects = dict( + ishapes=[(), ()], + oshapes=[()], + loopdims=(5,), + pinned=[False, False] + ) + template(signature, shapes, expects) + + def test_signature_7(self): + signature = '(), () -> ()' + shapes = (5,), () + expects = dict( + ishapes=[(), ()], + oshapes=[()], + loopdims=(5,), + pinned=[False, True] + ) + template(signature, shapes, expects) + + +if __name__ == '__main__': + unittest.main() diff --git a/lib/python3.10/site-packages/numba/cuda/tests/cudapy/test_iterators.py b/lib/python3.10/site-packages/numba/cuda/tests/cudapy/test_iterators.py new file mode 100644 index 0000000000000000000000000000000000000000..47366f3803e1a76bc1ab27f5390db250071baf31 --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/tests/cudapy/test_iterators.py @@ -0,0 +1,99 @@ +from numba import cuda +from numba.cuda.testing import unittest, CUDATestCase + +import numpy as np + + +class TestIterators(CUDATestCase): + + def test_enumerate(self): + @cuda.jit + def enumerator(x, error): + count = 0 + + for i, v in enumerate(x): + if count != i: + error[0] = 1 + if v != x[i]: + error[0] = 2 + + count += 1 + + if count != len(x): + error[0] = 3 + + x = np.asarray((10, 9, 8, 7, 6)) + error = np.zeros(1, dtype=np.int32) + + enumerator[1, 1](x, error) + self.assertEqual(error[0], 0) + + def _test_twoarg_function(self, f): + x = np.asarray((10, 9, 8, 7, 6)) + y = np.asarray((1, 2, 3, 4, 5)) + error = np.zeros(1, dtype=np.int32) + + f[1, 1](x, y, error) + self.assertEqual(error[0], 0) + + def test_zip(self): + @cuda.jit + def zipper(x, y, error): + i = 0 + + for xv, yv in zip(x, y): + if xv != x[i]: + error[0] = 1 + if yv != y[i]: + error[0] = 2 + + i += 1 + + if i != len(x): + error[0] = 3 + + self._test_twoarg_function(zipper) + + def test_enumerate_zip(self): + @cuda.jit + def enumerator_zipper(x, y, error): + count = 0 + + for i, (xv, yv) in enumerate(zip(x, y)): + if i != count: + error[0] = 1 + if xv != x[i]: + error[0] = 2 + if yv != y[i]: + error[0] = 3 + + count += 1 + + if count != len(x): + error[0] = 4 + + self._test_twoarg_function(enumerator_zipper) + + def test_zip_enumerate(self): + @cuda.jit + def zipper_enumerator(x, y, error): + count = 0 + + for (i, xv), yv in zip(enumerate(x), y): + if i != count: + error[0] = 1 + if xv != x[i]: + error[0] = 2 + if yv != y[i]: + error[0] = 3 + + count += 1 + + if count != len(x): + error[0] = 4 + + self._test_twoarg_function(zipper_enumerator) + + +if __name__ == '__main__': + unittest.main() diff --git a/lib/python3.10/site-packages/numba/cuda/tests/cudapy/test_sm_creation.py b/lib/python3.10/site-packages/numba/cuda/tests/cudapy/test_sm_creation.py new file mode 100644 index 0000000000000000000000000000000000000000..bff48e64288a2c28b2da0e5d06dfa09d226460fc --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/tests/cudapy/test_sm_creation.py @@ -0,0 +1,205 @@ +import numpy as np +from numba import cuda, float32, int32, void +from numba.core.errors import TypingError +from numba.cuda.testing import unittest, CUDATestCase +from numba.cuda.testing import skip_on_cudasim +from .extensions_usecases import test_struct_model_type + +GLOBAL_CONSTANT = 5 +GLOBAL_CONSTANT_2 = 6 +GLOBAL_CONSTANT_TUPLE = 5, 6 + + +def udt_global_constants(A): + sa = cuda.shared.array(shape=GLOBAL_CONSTANT, dtype=float32) + i = cuda.grid(1) + A[i] = sa[i] + + +def udt_global_build_tuple(A): + sa = cuda.shared.array(shape=(GLOBAL_CONSTANT, GLOBAL_CONSTANT_2), + dtype=float32) + i, j = cuda.grid(2) + A[i, j] = sa[i, j] + + +def udt_global_build_list(A): + sa = cuda.shared.array(shape=[GLOBAL_CONSTANT, GLOBAL_CONSTANT_2], + dtype=float32) + i, j = cuda.grid(2) + A[i, j] = sa[i, j] + + +def udt_global_constant_tuple(A): + sa = cuda.shared.array(shape=GLOBAL_CONSTANT_TUPLE, dtype=float32) + i, j = cuda.grid(2) + A[i, j] = sa[i, j] + + +def udt_invalid_1(A): + sa = cuda.shared.array(shape=A[0], dtype=float32) + i = cuda.grid(1) + A[i] = sa[i] + + +def udt_invalid_2(A): + sa = cuda.shared.array(shape=(1, A[0]), dtype=float32) + i, j = cuda.grid(2) + A[i, j] = sa[i, j] + + +def udt_invalid_3(A): + sa = cuda.shared.array(shape=(1, A[0]), dtype=float32) + i = cuda.grid(1) + A[i] = sa[i, 0] + + +class TestSharedMemoryCreation(CUDATestCase): + def getarg(self): + return np.array(100, dtype=np.float32, ndmin=1) + + def getarg2(self): + return self.getarg().reshape(1,1) + + def test_global_constants(self): + udt = cuda.jit((float32[:],))(udt_global_constants) + udt[1, 1](self.getarg()) + + def test_global_build_tuple(self): + udt = cuda.jit((float32[:, :],))(udt_global_build_tuple) + udt[1, 1](self.getarg2()) + + @skip_on_cudasim('Simulator does not prohibit lists for shared array shape') + def test_global_build_list(self): + with self.assertRaises(TypingError) as raises: + cuda.jit((float32[:, :],))(udt_global_build_list) + + self.assertIn("No implementation of function " + "Function(>> array(shape=list(int64), " + "dtype=class(float32)", + str(raises.exception)) + + def test_global_constant_tuple(self): + udt = cuda.jit((float32[:, :],))(udt_global_constant_tuple) + udt[1, 1](self.getarg2()) + + @skip_on_cudasim("Can't check for constants in simulator") + def test_invalid_1(self): + # Scalar shape cannot be a floating point value + with self.assertRaises(TypingError) as raises: + cuda.jit((float32[:],))(udt_invalid_1) + + self.assertIn("No implementation of function " + "Function(>> array(shape=float32, dtype=class(float32))", + str(raises.exception)) + + @skip_on_cudasim("Can't check for constants in simulator") + def test_invalid_2(self): + # Tuple shape cannot contain a floating point value + with self.assertRaises(TypingError) as raises: + cuda.jit((float32[:, :],))(udt_invalid_2) + + self.assertIn("No implementation of function " + "Function(>> array(shape=Tuple(Literal[int](1), " + "array(float32, 1d, A)), dtype=class(float32))", + str(raises.exception)) + + @skip_on_cudasim("Can't check for constants in simulator") + def test_invalid_3(self): + # Scalar shape must be literal + with self.assertRaises(TypingError) as raises: + cuda.jit((int32[:],))(udt_invalid_1) + + self.assertIn("No implementation of function " + "Function(>> array(shape=int32, dtype=class(float32))", + str(raises.exception)) + + @skip_on_cudasim("Can't check for constants in simulator") + def test_invalid_4(self): + # Tuple shape must contain only literals + with self.assertRaises(TypingError) as raises: + cuda.jit((int32[:],))(udt_invalid_3) + + self.assertIn("No implementation of function " + "Function(>> array(shape=Tuple(Literal[int](1), int32), " + "dtype=class(float32))", + str(raises.exception)) + + def check_dtype(self, f, dtype): + # Find the typing of the dtype argument to cuda.shared.array + annotation = next(iter(f.overloads.values()))._type_annotation + l_dtype = annotation.typemap['s'].dtype + # Ensure that the typing is correct + self.assertEqual(l_dtype, dtype) + + @skip_on_cudasim("Can't check typing in simulator") + def test_numba_dtype(self): + # Check that Numba types can be used as the dtype of a shared array + @cuda.jit(void(int32[::1])) + def f(x): + s = cuda.shared.array(10, dtype=int32) + s[0] = x[0] + x[0] = s[0] + + self.check_dtype(f, int32) + + @skip_on_cudasim("Can't check typing in simulator") + def test_numpy_dtype(self): + # Check that NumPy types can be used as the dtype of a shared array + @cuda.jit(void(int32[::1])) + def f(x): + s = cuda.shared.array(10, dtype=np.int32) + s[0] = x[0] + x[0] = s[0] + + self.check_dtype(f, int32) + + @skip_on_cudasim("Can't check typing in simulator") + def test_string_dtype(self): + # Check that strings can be used to specify the dtype of a shared array + @cuda.jit(void(int32[::1])) + def f(x): + s = cuda.shared.array(10, dtype='int32') + s[0] = x[0] + x[0] = s[0] + + self.check_dtype(f, int32) + + @skip_on_cudasim("Can't check typing in simulator") + def test_invalid_string_dtype(self): + # Check that strings of invalid dtypes cause a typing error + re = ".*Invalid NumPy dtype specified: 'int33'.*" + with self.assertRaisesRegex(TypingError, re): + @cuda.jit(void(int32[::1])) + def f(x): + s = cuda.shared.array(10, dtype='int33') + s[0] = x[0] + x[0] = s[0] + + @skip_on_cudasim("Can't check typing in simulator") + def test_type_with_struct_data_model(self): + @cuda.jit(void(test_struct_model_type[::1])) + def f(x): + s = cuda.shared.array(10, dtype=test_struct_model_type) + s[0] = x[0] + x[0] = s[0] + self.check_dtype(f, test_struct_model_type) + + +if __name__ == '__main__': + unittest.main() diff --git a/lib/python3.10/site-packages/numba/cuda/tests/cudapy/test_userexc.py b/lib/python3.10/site-packages/numba/cuda/tests/cudapy/test_userexc.py new file mode 100644 index 0000000000000000000000000000000000000000..2dca9c9f778d68168485b3a6457ce60e4f173c19 --- /dev/null +++ b/lib/python3.10/site-packages/numba/cuda/tests/cudapy/test_userexc.py @@ -0,0 +1,47 @@ +from numba.cuda.testing import unittest, CUDATestCase +from numba import cuda +from numba.core import config + + +class MyError(Exception): + pass + + +regex_pattern = ( + r'In function [\'"]test_exc[\'"], file [\:\.\/\\\-a-zA-Z_0-9]+, line \d+' +) + + +class TestUserExc(CUDATestCase): + + def setUp(self): + super().setUp() + # LTO optimizes away the exception status due to an oversight + # in the way we generate it (it is not added to the used list). + # See https://github.com/numba/numba/issues/9526. + self.skip_if_lto("Exceptions not supported with LTO") + + def test_user_exception(self): + @cuda.jit("void(int32)", debug=True) + def test_exc(x): + if x == 1: + raise MyError + elif x == 2: + raise MyError("foo") + + test_exc[1, 1](0) # no raise + with self.assertRaises(MyError) as cm: + test_exc[1, 1](1) + if not config.ENABLE_CUDASIM: + self.assertRegex(str(cm.exception), regex_pattern) + self.assertIn("tid=[0, 0, 0] ctaid=[0, 0, 0]", str(cm.exception)) + with self.assertRaises(MyError) as cm: + test_exc[1, 1](2) + if not config.ENABLE_CUDASIM: + self.assertRegex(str(cm.exception), regex_pattern) + self.assertRegex(str(cm.exception), regex_pattern) + self.assertIn("tid=[0, 0, 0] ctaid=[0, 0, 0]: foo", str(cm.exception)) + + +if __name__ == '__main__': + unittest.main() diff --git a/lib/python3.10/site-packages/tf2onnx/custom_opsets/__init__.py b/lib/python3.10/site-packages/tf2onnx/custom_opsets/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..39e39fbd7ae75f9df6b7ef870e7abb7782476837 --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/custom_opsets/__init__.py @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: Apache-2.0 + +""" custom tf2onnx mapping functions. """ + +from . import ms +from . import onnx_ml +from . import string_ops diff --git a/lib/python3.10/site-packages/tf2onnx/custom_opsets/ms.py b/lib/python3.10/site-packages/tf2onnx/custom_opsets/ms.py new file mode 100644 index 0000000000000000000000000000000000000000..aae2045ff360a26f49173814d4c500e2b27f84d5 --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/custom_opsets/ms.py @@ -0,0 +1,115 @@ +# SPDX-License-Identifier: Apache-2.0 + +""" tf2onnx mapping functions for ms domain. """ + +import numpy as np + +from onnx import onnx_pb +from onnx.onnx_pb import TensorProto +from tf2onnx import constants, utils +from tf2onnx.handler import tf_op +from tf2onnx.onnx_opset import controlflow +from tf2onnx.onnx_opset.nn import conv_convert_inputs, conv_dims_attr + + +# pylint: disable=unused-argument,missing-docstring + +def make_range(ctx, start, limit, delta, output, scope_name, shape, dtype): + if all(ctx.get_node_by_output(n).is_const() for n in [start, limit, delta]) is True: + controlflow.make_range_const(ctx, start, limit, delta, output, scope_name, shape, dtype) + else: + _make_range_non_const(ctx, start, limit, delta, output, scope_name, shape, dtype) + + +def _make_range_non_const(ctx, start, limit, delta, output, scope_name, shape, dtype): + utils.make_sure( + dtype in [TensorProto.FLOAT, TensorProto.DOUBLE, TensorProto.INT16, + TensorProto.INT32, TensorProto.INT64, + TensorProto.COMPLEX64, TensorProto.COMPLEX128], + "dtype %s is not supported", dtype) + ctx.make_node("Range", [start, limit, delta], outputs=[output], name=scope_name, shapes=[shape], dtypes=[dtype], + domain=constants.MICROSOFT_DOMAIN) + + +@tf_op("Range", domain=constants.MICROSOFT_DOMAIN) +class Range: + @classmethod + def version_1(cls, ctx, node, **kwargs): + """Range.""" + # T range = Range(T start, T limit, T delta) + dtype = node.get_attr_int("Tidx") + shape = node.output_shapes[0] + utils.make_sure(dtype is not None, "Tidx of %s is None", node.name) + ctx.remove_node(node.name) + make_range(ctx, node.input[0], node.input[1], node.input[2], node.output[0], node.name, shape, dtype) + + +@tf_op("Conv2DBackpropInput", domain=constants.MICROSOFT_DOMAIN, onnx_op="ConvTransposeWithDynamicPads") +class ConvTransposeWithDynamicPads: + @classmethod + def version_1(cls, ctx, node, **kwargs): + # T output = Conv2DBackpropInput(int32 input_sizes, T filter, T out_backprop, + # @list(int) strides, @bool use_cudnn_on_gpu, @string padding, @string data_format, @list(int) dilations) + # T Y = ConvTranspose(T X, T W, T B, T pads, @STRING auto_pad, @INTS dilations, + # @INT group, @INTS kernel_shape, @INTS output_shape, @INTS strides) + + # tf uses "output_shape" while onnx uses "pads", the equation to calculate pads is: + # total_padding[i] = stride[i] * (input_shape[i] - 1)+ kernel_shape[i] - output_shape[i] + # pads[i_begin] = total_padding[i]/2 + # pads[i_end] = total_padding[i] - (total_padding[i]/2) + # output dtype of onnx "shape" is int64 while in tf dtype could be specified + utils.make_sure(node.is_nhwc(), "only support NHWC for now") + node.domain = constants.MICROSOFT_DOMAIN + input_shape = ctx.make_node("Shape", [node.input[2]]) + hw_indices = ctx.make_const(utils.make_name("hw_indices"), np.array([1, 2]).astype(np.int64)) + input_shape_hw = ctx.make_node("Gather", [input_shape.output[0], hw_indices.output[0]]) + output_shape = node.input[0] + if ctx.get_dtype(output_shape) != onnx_pb.TensorProto.INT64: + output_shape = ctx.make_node("Cast", [output_shape], attr={"to": onnx_pb.TensorProto.INT64}).output[0] + output_shape_hw = ctx.make_node("Gather", [output_shape, hw_indices.output[0]]) + kernel_shape_hw = list(ctx.get_shape(node.input[1]))[0:2] + kernel_shape = ctx.make_const(utils.make_name("const_convtrans"), np.array(kernel_shape_hw).astype(np.int64)) + strides = conv_dims_attr(node, "strides") + utils.make_sure(len(strides) == 2, "only stride of H and W needed") + + stride_node = ctx.make_const(utils.make_name("const_convtrans"), np.array(strides).astype(np.int64)) + const_one = ctx.make_const(utils.make_name("cosnt_one"), np.array([1]).astype(np.int64)) + const_two = ctx.make_const(utils.make_name("cosnt_two"), np.array([2]).astype(np.int64)) + + tmp0 = ctx.make_node("Sub", [input_shape_hw.output[0], const_one.output[0]]) + tmp1 = ctx.make_node("Mul", [stride_node.output[0], tmp0.output[0]]) + tmp2 = ctx.make_node("Add", [tmp1.output[0], kernel_shape.output[0]]) + total_pads = ctx.make_node("Sub", [tmp2.output[0], output_shape_hw.output[0]], + dtypes=[onnx_pb.TensorProto.INT64]) + pads_beg = ctx.make_node("Div", [total_pads.output[0], const_two.output[0]], dtypes=[onnx_pb.TensorProto.INT64]) + pads_end = ctx.make_node("Sub", [total_pads.output[0], pads_beg.output[0]]) + pads = ctx.make_node("Concat", [pads_beg.output[0], pads_end.output[0]], attr={"axis": 0}) + # set node's attrs, Note: output_padding, group are left default. + conv_dims_attr(node, "dilations") + # set node's inputs from (output_shape, filter, input_tensor) to (input_tensor, filter, pads, Bias) + ctx.replace_input(node, node.input[0], node.input[2], 0) + ctx.replace_input(node, node.input[2], pads.output[0], 2) + conv_convert_inputs(ctx, node, with_kernel=True) + node.attr.pop("data_format") + node.attr.pop("padding") + if "explicit_paddings" in node.attr: + node.attr.pop("explicit_paddings") + +@tf_op("CropAndResize", domain=constants.MICROSOFT_DOMAIN) +class CropAndResize: + @classmethod + def version_1(cls, ctx, node, **kwargs): + """ utilize contrib cropandresize """ + node.attr['method'].name = 'mode' + node.domain = constants.MICROSOFT_DOMAIN + ctx.insert_new_node_on_input(node, "Transpose", node.input[0], perm=constants.NHWC_TO_NCHW) + ctx.insert_new_node_on_output("Transpose", node.output[0], node.name + '_transposed', + None, perm=constants.NCHW_TO_NHWC) + +@tf_op("MatrixInverse", domain=constants.MICROSOFT_DOMAIN, onnx_op="Inverse") +class Inverse: + @classmethod + def version_1(cls, ctx, node, **kwargs): + utils.make_sure(node.get_attr('adjoint').i == 0, "adjoint must be false") + del node.attr["adjoint"] + node.domain = constants.MICROSOFT_DOMAIN diff --git a/lib/python3.10/site-packages/tf2onnx/custom_opsets/onnx_ml.py b/lib/python3.10/site-packages/tf2onnx/custom_opsets/onnx_ml.py new file mode 100644 index 0000000000000000000000000000000000000000..78675991a549d679ccdcd35bfc790659d460c01c --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/custom_opsets/onnx_ml.py @@ -0,0 +1,99 @@ +# SPDX-License-Identifier: Apache-2.0 + +""" tf2onnx mapping functions for onnx ml domain. """ +import logging +import numpy as np +from onnx import TensorProto +from onnx import numpy_helper +from tf2onnx import constants +from tf2onnx.handler import tf_op +from tf2onnx import utils + + +logger = logging.getLogger(__name__) + +# pylint: disable=unused-argument,missing-docstring,unnecessary-pass + +@tf_op("HashTableV2") +class HashTable: + @classmethod + def version_8(cls, ctx, node, **kwargs): + """ HashTable will be removed """ + pass + + +@tf_op("LookupTableFindV2") +class LookupTableFind: + @classmethod + def version_8(cls, ctx, node, initialized_tables, **kwargs): + """ convert lookup to category mapper """ + table_node = node.inputs[0] + while table_node.type == 'Identity': + table_node = table_node.inputs[0] + shared_name = table_node.get_attr_value("shared_name") + utils.make_sure(shared_name is not None, "Could not determine table shared name for node %s", node.name) + utils.make_sure(shared_name in initialized_tables, "Initialized table %s for node %s not found.", + shared_name, node.name) + + default_node = node.inputs[2] + utils.make_sure(default_node.is_const(), "Default value of table lookup must be const.") + default_val_np = default_node.get_tensor_value(as_list=False) + default_val = default_node.get_tensor_value() + + dtype = ctx.get_dtype(node.output[0]) + in_dtype = ctx.get_dtype(node.input[1]) + utils.make_sure(dtype == TensorProto.INT64 and in_dtype == TensorProto.STRING, + "Only lookup tables of type string->int64 are currently supported.") + + cats_strings, cats_int64s = initialized_tables[shared_name] + shape = ctx.get_shape(node.output[0]) + + node_name = node.name + node_inputs = node.input + node_outputs = node.output + + if node.inputs[1].is_const(): + # Handle explicitly since const folding doesn't work for tables + key_np = node.inputs[1].get_tensor_value(as_list=False) + ctx.remove_node(node.name) + key_to_val = dict(zip(cats_strings, cats_int64s)) + def lookup_value(key): + return key_to_val.get(key.encode("UTF-8"), default_val_np) + lookup_result = np.vectorize(lookup_value)(key_np) + onnx_tensor = numpy_helper.from_array(lookup_result, node_name) + ctx.make_node("Const", name=node_name, inputs=[], outputs=node_outputs, + attr={"value": onnx_tensor}, shapes=[lookup_result.shape], dtypes=[dtype]) + else: + ctx.remove_node(node.name) + ctx.make_node("CategoryMapper", domain=constants.AI_ONNX_ML_DOMAIN, + name=node_name, inputs=[node_inputs[1]], outputs=node_outputs, + attr={'cats_int64s': cats_int64s, 'cats_strings': cats_strings, 'default_int64': default_val}, + shapes=[shape], dtypes=[dtype]) + + customer_nodes = ctx.find_output_consumers(table_node.output[0]) + if len(customer_nodes) == 0: + ctx.remove_node(table_node.name) + + +@tf_op("LookupTableSizeV2") +class LookupTableSize: + @classmethod + def version_1(cls, ctx, node, initialized_tables, **kwargs): + table_node = node.inputs[0] + while table_node.type == 'Identity': + table_node = table_node.inputs[0] + shared_name = table_node.get_attr_value("shared_name") + utils.make_sure(shared_name is not None, "Could not determine table shared name for node %s", node.name) + utils.make_sure(shared_name in initialized_tables, "Initialized table %s for node %s not found.", + shared_name, node.name) + keys, _ = initialized_tables[shared_name] + + node_name = node.name + node_outputs = node.output + ctx.remove_node(node.name) + size_const = ctx.make_const(node_name, np.array(len(keys), dtype=np.int64)) + ctx.replace_all_inputs(node_outputs[0], size_const.output[0]) + + customer_nodes = ctx.find_output_consumers(table_node.output[0]) + if len(customer_nodes) == 0: + ctx.remove_node(table_node.name) diff --git a/lib/python3.10/site-packages/tf2onnx/custom_opsets/string_ops.py b/lib/python3.10/site-packages/tf2onnx/custom_opsets/string_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..3419203831d9aef026810e857d5011df202acecb --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/custom_opsets/string_ops.py @@ -0,0 +1,170 @@ +# SPDX-License-Identifier: Apache-2.0 + +""" tf2onnx mapping functions for string ops using contrib ops domain. """ +import logging +import numpy as np +from onnx.onnx_pb import TensorProto + +from tf2onnx import constants, handler +from tf2onnx.handler import tf_op +from tf2onnx import utils +from tf2onnx.graph_builder import GraphBuilder + +logger = logging.getLogger(__name__) + +# pylint: disable=unused-argument,missing-docstring + +@tf_op(["StringSplit", "StringSplitV2"], domain=constants.CONTRIB_OPS_DOMAIN) +class StringOps: + @classmethod + def any_version(cls, opset, ctx, node, **kwargs): + if node.type == "StringSplit": + skip_empty = node.get_attr_value('skip_empty', True) + else: + skip_empty = False + node.type = "StringSplit" + node.domain = constants.CONTRIB_OPS_DOMAIN + for a in list(node.attr.keys()): + del node.attr[a] + unsqueeze_node = GraphBuilder(ctx).make_unsqueeze({'data': node.input[1], 'axes': [0]}, return_node=True) + + skip_empty_const = ctx.make_const(utils.make_name('skip_empty_const'), np.array([skip_empty], np.bool)) + ctx.replace_inputs(node, [node.input[0], unsqueeze_node.output[0], skip_empty_const.output[0]]) + + @classmethod + def version_1(cls, ctx, node, **kwargs): + cls.any_version(1, ctx, node, **kwargs) + + @classmethod + def version_13(cls, ctx, node, **kwargs): + cls.any_version(13, ctx, node, **kwargs) + + +@tf_op("StringToHashBucketFast", domain=constants.CONTRIB_OPS_DOMAIN) +class StringToHashBucketFast: + @classmethod + def version_1(cls, ctx, node, **kwargs): + node.domain = constants.CONTRIB_OPS_DOMAIN + num_buckets = node.get_attr_int('num_buckets') + num_buckets_const = ctx.make_const(utils.make_name('num_buckets'), np.array([num_buckets], dtype=np.int64)) + ctx.replace_inputs(node, [node.input[0], num_buckets_const.output[0]]) + del node.attr['num_buckets'] + +@tf_op("StaticRegexReplace", domain=constants.CONTRIB_OPS_DOMAIN) +class StaticRegexReplace: + @classmethod + def version_1(cls, ctx, node, **kwargs): + node.domain = constants.CONTRIB_OPS_DOMAIN + node.type = "StringRegexReplace" + pattern = node.get_attr_str("pattern") + rewrite = node.get_attr_str("rewrite") + utils.make_sure(node.get_attr_value("replace_global") != 0, + "Can not convert StaticRegexReplace if replace_global is False") + pattern_node = ctx.make_const(utils.make_name("pattern"), np.array([pattern], np.object)) + rewrite_node = ctx.make_const(utils.make_name("rewrite"), np.array([rewrite], np.object)) + del node.attr["pattern"] + del node.attr["rewrite"] + del node.attr["replace_global"] + ctx.replace_inputs(node, [node.input[0], pattern_node.output[0], rewrite_node.output[0]]) + +@tf_op("StringJoin", domain=constants.CONTRIB_OPS_DOMAIN) +class StringJoin: + @classmethod + def any_version(cls, opset, ctx, node, **kwargs): + node.domain = constants.CONTRIB_OPS_DOMAIN + separator = node.get_attr_value("separator") + if separator is None: + separator = b'' + separator = separator.decode('UTF-8') + separator_node = ctx.make_const(utils.make_name("separator"), np.array([separator], np.object)) + axis_node = ctx.make_const(utils.make_name("axis"), np.array([0], np.int64)) + inps_with_shapes = [i for i in node.input if ctx.get_shape(i) != []] + shape_node = None + if 0 < len(inps_with_shapes) < len(node.input): + shape_node = ctx.make_node("Shape", [inps_with_shapes[0]]) + unsqueezes = [] + for inp in node.input: + if ctx.get_shape(inp) == [] and shape_node is not None: + expand_node = ctx.make_node("Expand", [inp, shape_node.output[0]]) + inp = expand_node.output[0] + unsqueeze_node = GraphBuilder(ctx).make_unsqueeze({'data': inp, 'axes': [0]}) + unsqueezes.append(unsqueeze_node) + stack_node = ctx.make_node("Concat", unsqueezes, attr={'axis': 0}) + ctx.replace_inputs(node, [stack_node.output[0], separator_node.output[0], axis_node.output[0]]) + + @classmethod + def version_1(cls, ctx, node, **kwargs): + cls.any_version(1, ctx, node, **kwargs) + + @classmethod + def version_13(cls, ctx, node, **kwargs): + cls.any_version(13, ctx, node, **kwargs) + + +@tf_op(["Equal", "NotEqual"], domain=constants.CONTRIB_OPS_DOMAIN) +class StringEqual: + @classmethod + def version_1(cls, ctx, node, **kwargs): + dtype = ctx.get_dtype(node.input[0]) + if dtype != TensorProto.STRING: + # Fallback to normal domain conversion + func, _ = handler.tf_op.find_effective_op(node.type, constants.ONNX_DOMAIN) + func(ctx, node, **kwargs) + return + + need_not = node.type == "NotEqual" + node.type = "StringEqual" + node.domain = constants.CONTRIB_OPS_DOMAIN + if need_not: + output_name = node.output[0] + not_node = ctx.insert_new_node_on_output("Not", output_name, name=utils.make_name(node.name)) + ctx.copy_shape(output_name, not_node.output[0]) + ctx.copy_dtype(output_name, not_node.output[0]) + +@tf_op(["StringLower", "StringUpper"]) +class StringLower: + @classmethod + def version_10(cls, ctx, node, **kwargs): + if node.type == "StringLower": + case_action = "LOWER" + else: + case_action = "UPPER" + node.type = "StringNormalizer" + str_input = node.input[0] + rank = ctx.get_rank(node.input[0]) + shape = ctx.get_shape(node.input[0]) + if rank != 1: + ctx.insert_new_node_on_input(node, "Flatten", node.input[0], axis=0) + node.set_attr("case_change_action", case_action) + if rank != 1: + if shape is None or -1 in shape: + new_shape = ctx.make_node("Shape", [str_input]).output[0] + else: + new_shape = ctx.make_const(utils.make_name("shape"), np.array(shape, np.int64)).output[0] + ctx.insert_new_node_on_output("Reshape", node.output[0], inputs=[node.output[0], new_shape]) + +@tf_op("SentencepieceOp", domain=constants.CONTRIB_OPS_DOMAIN) +class SentencepieceOp: + @classmethod + def version_1(cls, ctx, node, **kwargs): + # This op will be removed when its consumer is converted + pass + +@tf_op("SentencepieceTokenizeOp", domain=constants.CONTRIB_OPS_DOMAIN) +class SentencepieceTokenizeOp: + @classmethod + def version_1(cls, ctx, node, **kwargs): + node.domain = constants.CONTRIB_OPS_DOMAIN + input_node = node.inputs[0] + utils.make_sure(input_node.type == "SentencepieceOp", "Input 0 to node %s is not SentencepieceOp", node.name) + ctx.remove_input(node, node.input[0], 0) + + nbest_size_cast = ctx.make_node("Cast", [node.input[1]], attr={'to': TensorProto.INT64}).output[0] + ctx.replace_input(node, node.input[1], nbest_size_cast, 1) + for i in range(1, len(node.input)): + unsqueeze = GraphBuilder(ctx).make_unsqueeze({'data': node.input[i], 'axes': [0]}) + ctx.replace_input(node, node.input[i], unsqueeze, i) + node.set_attr("model", input_node.attr['model'].s) + node.type = "SentencepieceTokenizer" + if ctx.is_safe_to_remove_nodes([input_node]): + ctx.remove_node(input_node.name) diff --git a/lib/python3.10/site-packages/tf2onnx/onnx_opset/__init__.py b/lib/python3.10/site-packages/tf2onnx/onnx_opset/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c3a4d9fe2e77307dff90a39b740e92bf2d3712b4 --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/onnx_opset/__init__.py @@ -0,0 +1,19 @@ +# SPDX-License-Identifier: Apache-2.0 + +"""tf2onnx.onnx_opset module""" + +from . import ( + common, + controlflow, + generator, + logical, + math, + misc, + nn, + quantize, + reduction, + rnn, + signal, + tensor, + traditionalml +) diff --git a/lib/python3.10/site-packages/tf2onnx/onnx_opset/common.py b/lib/python3.10/site-packages/tf2onnx/onnx_opset/common.py new file mode 100644 index 0000000000000000000000000000000000000000..77f6b8d3d92a2d47480f00fefd98d00662e6a437 --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/onnx_opset/common.py @@ -0,0 +1,69 @@ +# SPDX-License-Identifier: Apache-2.0 + + +""" +common +""" + +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals + +import logging + +from tf2onnx import constants + + +logger = logging.getLogger(__name__) + +# pylint: disable=unused-argument,missing-docstring + +class BroadcastOp: + @classmethod + def version_1(cls, ctx, node, **kwargs): + """Elementwise Ops with broadcast flag.""" + if node.type == "AddV2": + node.type = "Add" + shape0 = ctx.get_shape(node.input[0]) + shape1 = ctx.get_shape(node.input[1]) + if shape0 != shape1: + node.set_attr("broadcast", 1) + # this works around shortcomings in the broadcasting code + # of caffe2 and winml/rs4. + if ctx.is_target(constants.TARGET_RS4): + # in rs4 mul and add do not support scalar correctly + if not shape0: + if node.inputs[0].is_const(): + shape0 = node.inputs[0].scalar_to_dim1() + if not shape1: + if node.inputs[1].is_const(): + shape1 = node.inputs[1].scalar_to_dim1() + if shape0 and shape1 and len(shape0) < len(shape1) and node.type in ["Mul", "Add"]: + tmp = node.input[0] + ctx.replace_input(node, node.input[0], node.input[1], 0) + ctx.replace_input(node, node.input[1], tmp, 1) + else: + node.set_attr("broadcast", 0) + + @classmethod + def version_6(cls, ctx, node, **kwargs): + """Elementwise Ops with broadcast flag.""" + if node.type == "AddV2": + node.type = "Add" + shape0 = ctx.get_shape(node.input[0]) + shape1 = ctx.get_shape(node.input[1]) + if shape0 != shape1: + # this works around shortcomings in the broadcasting code + # of caffe2 and winml/rs4. + if ctx.is_target(constants.TARGET_RS4): + # in rs4 mul and add do not support scalar correctly + if not shape0: + if node.inputs[0].is_const(): + shape0 = node.inputs[0].scalar_to_dim1() + if not shape1: + if node.inputs[1].is_const(): + shape1 = node.inputs[1].scalar_to_dim1() + if shape0 and shape1 and len(shape0) < len(shape1) and node.type in ["Mul", "Add"]: + tmp = node.input[0] + ctx.replace_input(node, node.input[0], node.input[1], 0) + ctx.replace_input(node, node.input[1], tmp, 1) diff --git a/lib/python3.10/site-packages/tf2onnx/onnx_opset/controlflow.py b/lib/python3.10/site-packages/tf2onnx/onnx_opset/controlflow.py new file mode 100644 index 0000000000000000000000000000000000000000..f146f0b1520d8ba44c1a87f1d0d02e266f358a5d --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/onnx_opset/controlflow.py @@ -0,0 +1,672 @@ +# SPDX-License-Identifier: Apache-2.0 + + +""" +controlflow +""" + +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals + +import copy +import logging + +import numpy as np + +from onnx import onnx_pb +from onnx.onnx_pb import TensorProto +from tf2onnx import utils +from tf2onnx.handler import tf_op +from tf2onnx.tf_loader import find_function + + +logger = logging.getLogger(__name__) + + +# pylint: disable=unused-argument,missing-docstring + +def make_range_const(ctx, start, limit, delta, output, scope_name, shape, dtype): + """make Range subgraph if all inputs are const.""" + # T range = Range(T start, T limit, T delta) + # V v_final_and_scan_outputs = Loop(int64 M, B cond, V v_initial) + base_name = utils.make_name(scope_name) + start = ctx.get_node_by_output(start).get_tensor_value(as_list=False) + limit = ctx.get_node_by_output(limit).get_tensor_value(as_list=False) + delta = ctx.get_node_by_output(delta).get_tensor_value(as_list=False) + val = np.arange(start, limit, delta, dtype=start.dtype) + const_range = ctx.make_const(base_name, val) + ctx.make_node("Identity", [const_range.output[0]], shapes=[shape], dtypes=[dtype], outputs=[output]) + + +def make_range_non_const(ctx, start, limit, delta, output, scope_name, shape, dtype): + """make Range subgraph.""" + # T range = Range(T start, T limit, T delta) + # V v_final_and_scan_outputs = Loop(int64 M, B cond, V v_initial) + base_name = utils.make_name(scope_name) + + # trip_count + diff_node = ctx.make_node("Sub", + [limit, start], + op_name_scope=base_name, + name=utils.make_name("diff")) + diff_output = diff_node.output[0] + + delta_cast = delta + if dtype in [TensorProto.INT32, TensorProto.INT64]: + cast_node = ctx.make_node("Cast", [diff_output], op_name_scope=base_name, + name="cast_diff", attr={"to": TensorProto.FLOAT}) + diff_output = cast_node.output[0] + + cast_node = ctx.make_node("Cast", [delta], op_name_scope=base_name, name="cast_delta", + attr={"to": TensorProto.FLOAT}) + delta_cast = cast_node.output[0] + div_node = ctx.make_node("Div", [diff_output, delta_cast], op_name_scope=base_name, name="div") + ceil_node = ctx.make_node("Ceil", [div_node.output[0]], op_name_scope=base_name, name="ceil") + trip_count_node = ctx.make_node("Cast", [ceil_node.output[0]], op_name_scope=base_name, name="trip_cnt", + attr={"to": TensorProto.INT64}) + + # cond + # Use initializer here since Constant OP before opset 9 does not support bool type + cond_name = "{}_cond".format(base_name) + ctx.make_const(cond_name, np.ones((), dtype=bool)) + + # body + g = ctx.create_new_graph_with_same_config() + g.parent_graph = ctx + g.add_graph_input("i", TensorProto.INT64, []) + g.add_graph_input("cond", TensorProto.BOOL, []) + g.add_graph_input("prev", dtype, []) + + g.make_node("Identity", ["cond"], outputs=["cond_out"]) + g.make_node("Add", ["prev", delta], outputs=["current"], name=utils.make_name("add")) + g.make_node("Identity", ["prev"], outputs=["range"]) + + g.add_graph_output("cond_out", TensorProto.BOOL, []) + g.add_graph_output("current", dtype, []) + g.add_graph_output("range", dtype, []) + + # loop + loop_inputs = [trip_count_node.output[0], cond_name, start] + branches = {"body": g} + loop_node = ctx.make_node("Loop", loop_inputs, + output_count=2, op_name_scope=base_name, name="loop", branches=branches) + + ctx.make_node("Identity", [loop_node.output[1]], name=base_name, shapes=[shape], dtypes=[dtype], outputs=[output]) + + +def make_range(ctx, start, limit, delta, output, scope_name, shape, dtype): + if all(ctx.get_node_by_output(n).is_const() for n in [start, limit, delta]) is True: + make_range_const(ctx, start, limit, delta, output, scope_name, shape, dtype) + else: + make_range_non_const(ctx, start, limit, delta, output, scope_name, shape, dtype) + + +@tf_op(["Loop", "Scan"]) +class PassThroughOp: + @classmethod + def version_7(cls, ctx, node, **kwargs): + pass + + @classmethod + def version_11(cls, ctx, node, **kwargs): + # no change needed + # loop has 1 less mandatory input + # if = only doc changes + # scan has 1 less mandatory input and 4 extra attrs + pass + + +@tf_op("Range") +class Range: + @classmethod + def version_7(cls, ctx, node, **kwargs): + """Range.""" + # T range = Range(T start, T limit, T delta) + # V v_final_and_scan_outputs = Loop(int64 M, B cond, V v_initial) + dtype = node.get_attr_int("Tidx") + shape = node.output_shapes[0] + utils.make_sure(dtype is not None, "Tidx of %s is None", node.name) + ctx.remove_node(node.name) + make_range(ctx, node.input[0], node.input[1], node.input[2], + node.output[0], node.name, shape, dtype) + + @classmethod + def version_11(cls, ctx, node, **kwargs): + # opset 11 implements Range op explicitly + pass + + +@tf_op(["Select", "SelectV2"]) +class Select: + @classmethod + def version_7(cls, ctx, node, **kwargs): + # T output = Select(bool condition, T x, T y) + # Select_res = Add(Multiply(Cast(bool condition, T), T x,), + # Multiply(Cast(Not(bool condition), T), T y)). + # TODO: Fix case where condition is 1-dimensional + utils.make_sure(len(node.input) > 1, "Select with only condition is not supported.") + dtype = ctx.get_dtype(node.output[0]) + utils.make_sure(dtype != TensorProto.STRING, "Select with dtype string requires opset 9") + + cond_shape = ctx.get_shape(node.input[0]) + input_shape = ctx.get_shape(node.input[1]) + if input_shape is None: + input_shape = ctx.get_shape(node.input[2]) + input_rank = len(input_shape) if input_shape is not None else None + cond_rank = len(cond_shape) if cond_shape is not None else None + # if cond shape is 1-dimensional while input has higher rank, need to be reshaped to broadcast + if node.type == "Select" and cond_rank == 1 and input_rank != 1: + utils.make_sure(input_rank is not None, "input_rank unknown and cond_rank == 1") + broadcast_shape = [cond_shape[0]] + [1] * (input_rank - 1) + shape_const = ctx.make_const(utils.make_name(node.name), np.array(broadcast_shape, dtype=np.int64)) + reshape = ctx.make_node("Reshape", [node.input[0], shape_const.output[0]]) + ctx.replace_input(node, node.input[0], reshape.output[0], 0) + + positive_cast = ctx.make_node("Cast", [node.input[0]], name=utils.make_name(node.name), + attr={"to": dtype}) + negative = ctx.make_node("Not", [node.input[0]], name=utils.make_name(node.name)) + negative_cast = ctx.make_node("Cast", [negative.output[0]], name=utils.make_name(node.name), + attr={"to": dtype}) + multiply_1 = ctx.make_node("Mul", [positive_cast.output[0], node.input[1]], name=utils.make_name(node.name)) + multiply_2 = ctx.make_node("Mul", [node.input[2], negative_cast.output[0]], name=utils.make_name(node.name)) + add_name = node.name + add_out = node.output + shape = ctx.get_shape(node.output[0]) + ctx.remove_node(node.name) + ctx.make_node("Add", [multiply_1.output[0], multiply_2.output[0]], outputs=add_out, name=add_name, + dtypes=[dtype], shapes=[shape]) + + @classmethod + def version_9(cls, ctx, node, **kwargs): + # T output = Select(bool condition, T x, T y) + # T1 output = Where(bool condition, T1 x, T1 y) + # NOTE: condition can be 1-dimension in tensorflow, while in onnx, + # it should be broadcastable with other two inputs + if ctx.get_dtype(node.output[0]) != TensorProto.STRING: + # Due to bad ORT implementation, Mul/Add ops are faster than Where op + cls.version_7(ctx, node, **kwargs) + return + + cond_shape = ctx.get_shape(node.input[0]) + input_shape = ctx.get_shape(node.input[1]) + if input_shape is None: + input_shape = ctx.get_shape(node.input[2]) + input_rank = len(input_shape) if input_shape is not None else None + cond_rank = len(cond_shape) if cond_shape is not None else None + # if cond shape is 1-dimensional while input has higher rank, need to be reshaped to broadcast + if node.type == "Select" and cond_rank == 1 and input_rank != 1: + utils.make_sure(input_rank is not None, "input_rank unknown and cond_rank == 1") + broadcast_shape = [cond_shape[0]] + [1] * (input_rank - 1) + shape_const = ctx.make_const(utils.make_name(node.name), np.array(broadcast_shape, dtype=np.int64)) + reshape = ctx.make_node("Reshape", [node.input[0], shape_const.output[0]]) + ctx.replace_input(node, node.input[0], reshape.output[0], 0) + node.type = "Where" + + +@tf_op("Where") +class Where: + @classmethod + def version_9(cls, ctx, node, **kwargs): + # T_y output = Where(T_x condition), return indices of elements whose value are True + node.type = "NonZero" + # in onnx, indices are returned in this way [[ind_a_0, ind_b_0, ...], [ind_a_1, ind_b_1,...]]; + # while in tf, the result will be [[ind_a_0, ind_a_1, ...], [ind_b_0, ind_b_1, ...], ...] + # this is the reason a transpose node inserted here. + transpose_node = ctx.insert_new_node_on_output("Transpose", + node.output[0], name=utils.make_name("where_op_added")) + ctx.copy_shape(node.output[0], transpose_node.output[0]) + ctx.copy_dtype(node.output[0], transpose_node.output[0]) + + +@tf_op(["StatelessIf"]) +class StatelessIfOp: + @classmethod + def version_1(cls, ctx, node, **kwargs): + """V2 control flow - If""" + inputs = node.input[1:] + + output_shapes = node.output_shapes + output_dtypes = node.output_dtypes + ctx.remove_node(node.name) + + # replace the original node + branches = {} + for branch in ["then_branch", "else_branch"]: + func_name = node.get_attr_str(branch) + g = find_function(func_name) + g.parent_graph = ctx + wire_if_branch(ctx, g, inputs, output_shapes, output_dtypes, func_name, node.name) + branches[branch] = g + + _ = ctx.make_node("If", node.input[:1], name=node.name, output_count=len(output_shapes), + shapes=output_shapes, dtypes=output_dtypes, skip_conversion=True, branches=branches) + + +@tf_op(["If"]) +class IfOp: + @classmethod + def version_1(cls, ctx, node, **kwargs): + """V2 control flow - If""" + inputs = node.input[1:] + + if node.type == "If" and len(inputs) == 0: + # this comes from the re-writers + return + + output_shapes = node.output_shapes + output_dtypes = node.output_dtypes + ctx.remove_node(node.name) + + # replace the original node + branches = {} + for branch in ["then_branch", "else_branch"]: + func_name = node.get_attr_str(branch) + g = find_function(func_name) + g.parent_graph = ctx + wire_if_branch(ctx, g, inputs, output_shapes, output_dtypes, func_name, node.name) + branches[branch] = g + + _ = ctx.make_node("If", node.input[:1], name=node.name, output_count=len(output_shapes), + shapes=output_shapes, dtypes=output_dtypes, outputs=node.output, skip_conversion=True, + branches=branches) + + +@tf_op(["TensorListSetItem"]) +class TensorListSetItem: + @classmethod + def version_7(cls, ctx, node, **kwargs): + # handled in 'While' + pass + + +@tf_op(["TensorListGetItem"]) +class TensorListGetItem: + @classmethod + def version_7(cls, ctx, node, **kwargs): + ctx.ta_reads.append(node.input[0]) + node.type = "Gather" + ctx.replace_inputs(node, [node.input[0], node.input[1]]) + + @classmethod + def version_13(cls, ctx, node, **kwargs): + cls.version_7(ctx, node, **kwargs) + + +@tf_op(["TensorListLength"]) +class TensorListLength: + @classmethod + def version_7(cls, ctx, node, **kwargs): + pass + + +@tf_op(["TensorListReserve", "TensorListResize"]) +class TensorListReserve: + @classmethod + def version_7(cls, ctx, node, **kwargs): + pass + + +@tf_op(["TensorListFromTensor"]) +class TensorListFromTensor: + @classmethod + def version_7(cls, ctx, node, **kwargs): + consumers = ctx.find_output_consumers(node.output[0]) + if any([c.is_while() for c in consumers]): + node.type = "Identity" + ctx.copy_dtype(node.input[0], node.output[0]) + ctx.copy_shape(node.input[0], node.output[0]) + + +@tf_op(["TensorListStack"]) +class TensorListStack: + @classmethod + def version_7(cls, ctx, node, **kwargs): + if node.inputs[0].is_while(): + ctx.remove_node(node.name) + ctx.replace_all_inputs(node.output[0], node.input[0]) # ops=ctx.get_nodes() + + +@tf_op(["While", "StatelessWhile"]) +class While: + @classmethod + def version_7(cls, ctx, node, **kwargs): + # the tensorflow while input is: + # loop_counter, max_iterations, [loop_vars] + # cond and body use the same inputs + # outputs are identical to inputs + tf_while_inputs = node.input + + # the onnx loop input is: + # max_iterations, cond, [loop_vars] + # body uses the inputs: + # iteration, cond, [loop_vars] + # the onnx loop output is: + # cond [v_final_and_scan_outputs] + + output_shapes = node.output_shapes + output_dtypes = node.output_dtypes + # node.output must be copied as some element + # may be removed from output_names below + output_names = node.output.copy() + + # Make maximum_iterations int64 and replace -1(tf) with maxsize(onnx). If the const node has no other + # consumers, modify it in place. Otherwise, make a new const node and leave the original unchanged. + # if maximum_iterations is not const,should add an cast node(cast to int64) + maximum_iterations_name = node.input[1] + if node.inputs[1].is_const(): + maximum_iterations = node.inputs[1].get_tensor_value() + if maximum_iterations == -1: + maximum_iterations = np.iinfo(np.int64).max + consumers = ctx.find_output_consumers(maximum_iterations_name) + external_consumers = [c for c in consumers if c != node and c.type != 'TensorListReserve'] + if len(external_consumers) == 0: + ctx.remove_node(node.inputs[1].name) + else: + maximum_iterations_name = utils.make_name(node.inputs[1].name) + ctx.make_const(maximum_iterations_name, np.array(maximum_iterations, dtype=np.int64)) + ctx.replace_input(node, node.input[1], maximum_iterations_name, 1) + maximum_iterations_int64 = maximum_iterations_name + else: + cast_inputs = [maximum_iterations_name] + attr = {"to": onnx_pb.TensorProto.INT64} + cast_name = node.name + "_cast" + cast_node = ctx.make_node("Cast", cast_inputs, attr, name=cast_name) + maximum_iterations_int64 = cast_node.output[0] + + cond_name = node.get_attr_str("cond") + cond_graph = find_function(cond_name) + cond_graph.parent_graph = ctx + + body_name = node.get_attr_str("body") + body = find_function(body_name) + body.parent_graph = ctx + + loop_vars = [] # passed into the loop + body_input_to_state_var = {} # Map from body input name to state var name + cond_input_to_state_var = {} + to_remove = [] + input_idx_to_remove = [] + # remove TensorListReserve + for idx, name in enumerate(tf_while_inputs): + if idx == 1: + # onnx does not know maximum_iterations in the body so move this to a state var + body_input_to_state_var[body.input_names[idx]] = maximum_iterations_name + cond_input_to_state_var[cond_graph.input_names[idx]] = maximum_iterations_name + continue + if idx < 2: + # skip [0,1] loop_counter, max_iterations + continue + n = node.inputs[idx] + if n.type in ["TensorListReserve", "TensorListResize"]: + # there is no equivalent step in onnx and we should remove it. + to_remove.append((idx, n)) + continue + + # tensor arrays we read from can't be loop_vars and we fetch them from the outer context instead + if body.input_names[idx] in body.ta_reads: + body_input_to_state_var[body.input_names[idx]] = name + cond_input_to_state_var[cond_graph.input_names[idx]] = name + input_idx_to_remove.append(idx) + else: + loop_vars.append(name) + + # loop_vars that become state_vars need to be removed from output as well + for idx in reversed(input_idx_to_remove): + del output_shapes[idx] + del output_dtypes[idx] + del output_names[idx] + del body.outputs[idx] + + scan_output_names = [] + # remove tensor array that are passed in to the loop + for idx, n in reversed(to_remove): + ctx.remove_node(n.name) + # make the node output bad + ctx.replace_all_inputs(n.output[0], "@@ALLOC") # ops=ctx.get_nodes() + del body.inputs[idx] + del cond_graph.inputs[idx] + del tf_while_inputs[idx] + scan_output_names.append(body.outputs[idx]) + del body.outputs[idx] + output_shapes.append(output_shapes[idx]) + output_dtypes.append(output_dtypes[idx]) + output_names.append(output_names[idx]) + del output_shapes[idx] + del output_dtypes[idx] + del output_names[idx] + + ctx.remove_node(node.name) + + # In onnx 'cond' is a variable, not a function. We need to inject the subgraph into the main graph + # before the loop and into the body. + cond_binding = parameter_binding(cond_graph, tf_while_inputs) + cond_outputs = inline_subgraph(ctx, cond_graph, cond_name, cond_binding) + # onnx Loop op outputs only loop_vars so we need shift output dtypes/shapes and consumers + output_shapes = output_shapes[2:] + output_dtypes = output_dtypes[2:] + output_names = output_names[2:] + + branches = {"body": body} + loop_node = ctx.make_node("Loop", [maximum_iterations_int64, cond_outputs[0]] + loop_vars, + output_count=len(output_shapes), name=node.name + "_loop", + shapes=output_shapes, dtypes=output_dtypes, skip_conversion=True, + branches=branches) + + output_map = dict(zip(output_names, loop_node.output)) + + # shift output consumers + for k, v in output_map.items(): + ctx.replace_all_inputs(k, v) # ops=ctx.get_nodes() + + wire_while_body(ctx, body, loop_node.inputs, body_input_to_state_var, cond_input_to_state_var, output_shapes, + output_dtypes, body_name, node.name, cond_graph, tf_while_inputs, scan_output_names) + + # if there was a tensorflow variant type, bind in a real type here + # FIXME: I don't think this is needed anymore + for i, n in enumerate(body.inputs): + if body.get_dtype(n.output[0]) == onnx_pb.TensorProto.UNDEFINED: + body.set_dtype(n.output[0], ctx.get_dtype(loop_node.input[i])) + + +def wire_while_body(parent_g, g, loop_node_inputs, body_input_to_state_var, cond_input_to_state_var, output_shapes, + output_dtypes, scope, parent, cond_graph, tf_while_inputs, scan_output_names): + """Wire subgraph graph into main.""" + remove_parents = [] + to_remove = [] + + # tensorflow function inputs that are state_vars come from outer context and + # we need to remove them from the inputs by making the placeholder an identity + for n in g.inputs: + if n.output[0] in body_input_to_state_var: + n.type = "Identity" + g.replace_inputs(n, [body_input_to_state_var[n.output[0]]]) + + # onnx will pass in cond as argument + cond_node = g.make_node("Placeholder", [], name=utils.make_name("cond"), + output_count=1, dtypes=[onnx_pb.TensorProto.BOOL], shapes=[[]]) + + # in onnx the body inputs are: index, cond, [loop_vars] + func_inputs = [i for i in g.input_names[2:] if i not in body_input_to_state_var] + func_inputs = [g.input_names[0], cond_node.output[0]] + func_inputs + g.set_dtype(func_inputs[0], onnx_pb.TensorProto.INT64) + g.inputs = [g.get_node_by_output(inp) for inp in func_inputs] + + for p, c in zip(loop_node_inputs, func_inputs): + shape = p.output_shapes[0] + g.set_shape(c, shape) + + for i, node in enumerate(g.inputs): + if node.output[0] not in func_inputs: + remove_parents.append(node.output[0]) + + # this is a tensor array write - make it an identity + scan_outputs = [] + for node in g.get_nodes(): + if node.type == "TensorListSetItem": + remove_parents.append(node.input[0]) + node.type = "Identity" + g.set_shape(node.output[0], g.get_shape(node.input[2])) + g.set_dtype(node.output[0], g.get_dtype(node.input[2])) + g.replace_inputs(node, [node.input[2]]) + scan_outputs.append(node.output[0]) + + if len(scan_outputs) != len(scan_output_names): + raise ValueError("While loop couldn't find scan output index for nodes") + + names_to_scan_outputs = {} + for output in scan_outputs: + last_output = output + consumers = g.find_output_consumers(last_output) + while consumers: + node = consumers[0] + if node.type != "Identity": + raise ValueError("While loop couldn't find scan output index for node " + node.name) + last_output = node.output[0] + consumers = g.find_output_consumers(last_output) + if last_output not in scan_output_names: + raise ValueError("While loop couldn't find scan output index for node " + node.name) + names_to_scan_outputs[last_output] = output + + # Reorder scan outputs + scan_outputs = [names_to_scan_outputs[name] for name in scan_output_names] + + # remove all nodes feeding to TensorListSetItem's reserved tensor + while remove_parents: + output_name = remove_parents[0] + del remove_parents[0] + node = g.get_node_by_output(output_name) + if node: + if output_name not in func_inputs: + if node.input: + remove_parents.extend(node.input) + g.remove_node(node.name) + + for node in to_remove: + g.remove_node(node.name) + + cond_binding = parameter_binding(cond_graph, func_inputs[:1] + g.outputs[2:], cond_input_to_state_var) + cond_outputs = inline_subgraph(g, cond_graph, "cond__", cond_binding) + + g.outputs = [cond_outputs[0]] + g.outputs[2:] + scan_outputs + + # FIXME: onnx does not have a variant type so we try to fish for the dtype in a prior TensorListSetItem. + for o in g.outputs: + if g.get_dtype(o) == onnx_pb.TensorProto.UNDEFINED: + node = g.get_node_by_output(o) + if node.type in ["Identity"]: + g.set_dtype(o, node.inputs[0].output_dtypes[0]) + + return g + + +def wire_if_branch(parent_g, g, inputs, output_shapes, output_dtypes, scope, parent): + """Wire subgraph graph into main.""" + binding = parameter_binding(g, inputs) + to_remove = [] + for node in g.inputs: + parent_name = binding.get(node.output[0]) + if parent_name and parent_name != "@@ALLOC": + g.replace_inputs(node, [parent_name]) + node.type = "Identity" + else: + to_remove.append(node) + + for node in to_remove: + g.remove_node(node.name) + + prefix_graph(g, scope) + + for shape, dtype, output_name in zip(output_shapes, output_dtypes, g.outputs): + g.set_shape(output_name, shape) + g.set_dtype(output_name, dtype) + + return g + + +def inline_subgraph(parent, g, scope, binding): + # make a copy since we don't want to change the origianl graph + g = copy.deepcopy(g) + to_remove = [] + for node in g.inputs: + parent_name = binding.get(node.output[0]) + if parent_name and parent_name != "@@ALLOC": + g.replace_inputs(node, [parent_name]) + node.type = "Identity" + else: + to_remove.append(node) + for node in to_remove: + g.remove_node(node.name) + prefix_graph(g, scope) + for n in g.get_nodes(): + dtypes = n.output_dtypes + shapes = n.output_shapes + n.graph = parent + for name, shape, dtype in zip(n.output, shapes, dtypes): + # FIXME: don't access this directly + parent._output_shapes[name] = shape # pylint: disable=protected-access + parent._dtypes[name] = dtype # pylint: disable=protected-access + + ops = parent.get_nodes() + g.get_nodes() + parent.reset_nodes(ops) + + # copy output shape and dtype to parent graph + for name in g.outputs: + parent.set_dtype(name, g.get_dtype(name)) + parent.set_shape(name, g.get_shape(name)) + + return g.outputs + + +def parameter_binding(g, inputs, state_vars=None): + binding = {} + i = 0 + for k in g.input_names: + if state_vars and k in state_vars: + binding[k] = state_vars[k] + else: + binding[k] = inputs[i] + i += 1 + utils.make_sure(i == len(inputs), "Parameter count mismatch while binding controlflow") + return binding + + +def prefix_graph(g, scope): + ops = g.get_nodes()[:] + to_remove = [] + for node in ops: + output_shapes = node.output_shapes + output_dtypes = node.output_dtypes + attr = node.attr + if node.is_graph_input(): + continue + branches = {} + attr_graphs = node.get_body_graphs() + if attr_graphs: + for k, v in attr_graphs.items(): + branches[k] = v + new_node = g.make_node(node.type, node.input, name=node.name, output_count=len(node.output), + shapes=output_shapes, dtypes=output_dtypes, attr=attr, + op_name_scope=scope, skip_conversion=True, branches=branches) + for old_output, new_output in zip(node.output, new_node.output): + for i, oname in enumerate(g.outputs): + if old_output == oname: + g.outputs[i] = new_output + break + g.replace_all_inputs(old_output, new_output, ops=ops) + to_remove.append(node) + for node in to_remove: + g.remove_node(node.name) + + +def dump_graph(g): + print() + print("--, graph=", g.graph_name) + t = ["{} {}/{}".format(n.name, g.get_shape(n.output[0]), g.get_dtype(n.output[0])) for n in g.inputs] + print("--, inputs=", ", ".join(t)) + t = ["{} {}/{}".format(n, g.get_shape(n), g.get_dtype(n)) for n in g.outputs] + print("--, outputs=", ", ".join(t)) + for node in g.get_nodes(): + input_names = ", ".join(["{} {}/{}".format(n, g.get_shape(n), g.get_dtype(n)) for n in node.input]) + output_names = ", ".join(["{} {}/{}".format(n, g.get_shape(n), g.get_dtype(n)) for n in node.output]) + print("-- {} n={} i={} o={}".format(node.type, node.name, input_names, output_names)) diff --git a/lib/python3.10/site-packages/tf2onnx/onnx_opset/generator.py b/lib/python3.10/site-packages/tf2onnx/onnx_opset/generator.py new file mode 100644 index 0000000000000000000000000000000000000000..0daf4fb7a07e7999e888e00b6d212a890436a827 --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/onnx_opset/generator.py @@ -0,0 +1,254 @@ +# SPDX-License-Identifier: Apache-2.0 + + +""" +generator +""" + +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals + +import logging + +import numpy as np +from onnx import onnx_pb, numpy_helper +from tf2onnx import utils +from tf2onnx.handler import tf_op +from tf2onnx.graph_builder import GraphBuilder + +logger = logging.getLogger(__name__) + + +# pylint: disable=unused-argument,missing-docstring + +@tf_op(["Const", "ConstV2"]) +class DirectOp: + @classmethod + def version_1(cls, ctx, node, **kwargs): + pass + + +@tf_op(["RandomNormal", "RandomUniform", "RandomUniformInt"]) +class RandomOp: + @classmethod + def randuniform_int(cls, ctx, rand_node, rand_out, min_inp, max_inp): + dtype = ctx.get_dtype(rand_out) + min_node = ctx.get_node_by_output(min_inp) + max_node = ctx.get_node_by_output(max_inp) + if min_node.is_const() and max_node.is_const(): + rand_node.set_attr('low', float(min_node.get_tensor_value())) + rand_node.set_attr('high', float(max_node.get_tensor_value())) + out = rand_out + elif min_node.is_const() and min_node.get_tensor_value() == 0: + max_float = ctx.make_node("Cast", [max_inp], attr={'to': onnx_pb.TensorProto.FLOAT}).output[0] + mul_node = ctx.insert_new_node_on_output("Mul", rand_out, inputs=[rand_out, max_float]) + out = mul_node.output[0] + else: + min_float = ctx.make_node("Cast", [min_inp], attr={'to': onnx_pb.TensorProto.FLOAT}).output[0] + max_float = ctx.make_node("Cast", [max_inp], attr={'to': onnx_pb.TensorProto.FLOAT}).output[0] + diff = ctx.make_node("Sub", [max_float, min_float]).output[0] + diff_float = ctx.make_node("Cast", [diff], attr={'to': onnx_pb.TensorProto.FLOAT}).output[0] + mul_node = ctx.insert_new_node_on_output("Mul", rand_out, inputs=[rand_out, diff_float]) + mul = mul_node.output[0] + add_node = ctx.insert_new_node_on_output("Add", mul, inputs=[mul, min_float]) + out = add_node.output[0] + floor_node = ctx.insert_new_node_on_output("Floor", out) + ctx.insert_new_node_on_output("Cast", floor_node.output[0], to=dtype) + + @classmethod + def version_1(cls, ctx, node, **kwargs): + # in tf-2.0 grappler optimizes the graph pretty well and our matching logic + # in the rewriter does not trigger. grappler will send the random uniform + # with shape as input so we need to pickup the input here and if the shape is + # const we make it an attribute. + seed = node.get_attr("seed") + node.set_attr("seed", float(seed.f)) + utils.make_sure(node.inputs[0].is_const(), "%s node with non-const shape requires opset >= 9") + shape = node.inputs[0].get_tensor_value() + ctx.remove_input(node, node.input[0], 0) + if len(shape) == 0: + # ORT can't take an empty shape (scalar) + node.set_attr("shape", [1]) + ctx.set_shape(node.output[0], [1]) + squeeze_node = GraphBuilder(ctx).make_squeeze({'data': node.output[0], 'axes': [0]}, return_node=True) + ctx.insert_node_on_output(squeeze_node, node.output[0]) + rand_out = squeeze_node.output[0] + else: + node.set_attr("shape", shape) + ctx.set_shape(node.output[0], shape) + rand_out = node.output[0] + if node.type == "RandomUniformInt": + cls.randuniform_int(ctx, node, rand_out, node.input[0], node.input[1]) + node.type = "RandomUniform" + ctx.replace_inputs(node, []) + + @classmethod + def version_9(cls, ctx, node, **kwargs): + if node.inputs[0].is_const(): + cls.version_1(ctx, node, **kwargs) + else: + seed = node.get_attr("seed") + node.set_attr("seed", float(seed.f)) + cast_node = ctx.make_node("Cast", [node.input[0]], attr={'to': onnx_pb.TensorProto.INT64}) + const_node = ctx.make_node("ConstantOfShape", cast_node.output) + inputs = node.input.copy() + ctx.replace_inputs(node, const_node.output.copy()) + if node.type == "RandomUniformInt": + cls.randuniform_int(ctx, node, node.output[0], inputs[1], inputs[2]) + node.type = "RandomUniformLike" + else: + node.type = node.type + 'Like' + + +@tf_op(["RandomNormalLike", "RandomUniformLike"]) +class PassThroughOp: + @classmethod + def version_1(cls, ctx, node, **kwargs): + pass + +@tf_op("Fill") +class Fill: + @classmethod + def version_7(cls, ctx, node, **kwargs): + # T output = Fill(int32 dims, T value, @int32 index_type) + # T outputs = Tile(T value, int64 repeats (e.g. dims)) + fill_shape = ctx.get_shape(node.input[0]) + utils.make_sure(fill_shape is not None, "shape of {} is None".format(node.input[0])) + fill_shape_dims = fill_shape[0] + utils.make_sure(fill_shape_dims > 0, "opset 7 requires fill shape length > 0, or please try opset > 7") + val_dtype = ctx.get_dtype(node.input[1]) + val_shape = ctx.get_shape(node.input[1]) + + need_cast = val_dtype != onnx_pb.TensorProto.FLOAT and ctx.opset < 9 + new_dtype = val_dtype + if need_cast: + new_dtype = onnx_pb.TensorProto.FLOAT + attr = {"to": new_dtype} + cast_to_float = ctx.insert_new_node_on_input(node, "Cast", node.input[1], name=None, **attr) + ctx.set_dtype(cast_to_float.output[0], new_dtype) + ctx.set_shape(cast_to_float.output[0], val_shape) + + for _ in range(fill_shape_dims): + attr = {"axes": [0]} + shape = ctx.get_shape(node.input[1]) + unsqueeze_node = ctx.insert_new_node_on_input(node, "Unsqueeze", node.input[1], name=None, **attr) + ctx.set_dtype(unsqueeze_node.output[0], new_dtype) + if shape: + shape = [1] + shape + else: + shape = [1] + ctx.set_shape(unsqueeze_node.output[0], shape) + + # Tile's repeats must be INT64 + attr = {"to": onnx_pb.TensorProto.INT64} + tile_shape_int64 = ctx.insert_new_node_on_input(node, "Cast", node.input[0], name=None, **attr) + ctx.set_dtype(tile_shape_int64.output[0], onnx_pb.TensorProto.INT64) + ctx.set_shape(tile_shape_int64.output[0], fill_shape) + + tmp = node.input[0] + ctx.replace_input(node, node.input[0], node.input[1], 0) + ctx.replace_input(node, node.input[1], tmp, 1) + node.type = "Tile" + ctx.set_dtype(node.output[0], new_dtype) + + if need_cast: + attr = {"to": val_dtype} + op_name = utils.make_name(node.name + "/cast_back") + cast_back = ctx.insert_new_node_on_output("Cast", node.output[0], name=op_name, **attr) + ctx.set_dtype(cast_back.output[0], val_dtype) + + @classmethod + def version_9(cls, ctx, node, **kwargs): + node.type = "ConstantOfShape" + # both shape and value in tensorflow are passed as tensor. + # In onnx the value is an attribute so we need to fetch the value as const which + # sooner or later will be a problem for tensorflow-onnx. + # ConstantOfShape in onnxruntime only support int64, so insert cast op + input_dtype_is_int64 = utils.map_onnx_to_numpy_type(ctx.get_dtype(node.input[0])) == np.int64 + if not input_dtype_is_int64: + ctx.insert_new_node_on_input(node, "Cast", node.input[0], to=onnx_pb.TensorProto.INT64) + dtype = ctx.get_dtype(node.output[0]) + value = np.array([node.inputs[1].get_tensor_value()]).astype(utils.map_onnx_to_numpy_type(dtype)) + value_proto = numpy_helper.from_array(value) + node.set_attr("value", value_proto) + ctx.remove_input(node, node.input[1], 1) + + @classmethod + def version_11(cls, ctx, node, **kwargs): + # cls.version_7(ctx, node, **kwargs) + node.type = "Expand" + ctx.replace_inputs(node, [node.input[1], node.input[0]]) + # cast shape to int64 if needed + if ctx.get_dtype(node.input[1]) != onnx_pb.TensorProto.INT64: + ctx.insert_new_node_on_input(node, "Cast", node.input[1], to=onnx_pb.TensorProto.INT64) + + +@tf_op("Multinomial") +class Multinomial: + @classmethod + def version_7(cls, ctx, node, **kwargs): + # output_dtype output = Multinomial(T logits, int32 num_samples, @int seed, @int seed2, @type output_dtype) + sample_size = node.inputs[1].get_tensor_value() + seed = node.get_attr("seed") + if seed: + node.set_attr("seed", float(seed.i)) + output_dtype = node.get_attr("output_dtype") + if output_dtype: + output_dtype = output_dtype.i + else: + output_dtype = onnx_pb.TensorProto.INT32 + node.set_attr("dtype", output_dtype) + node.set_attr("sample_size", sample_size) + ctx.remove_input(node, node.input[1], 1) + + +@tf_op("ZerosLike") +class ZerosLike: + @classmethod + def version_1(cls, ctx, node, **kwargs): + shapes = node.output_shapes + dtypes = node.output_dtypes + ctx.remove_node(node.name) + casted_input = ctx.make_node("Cast", node.input, attr={'to': onnx_pb.TensorProto.INT64}) + const_zero = ctx.make_const(utils.make_name("zero"), np.array(0).astype(np.int64)) + mul_node = ctx.make_node('Mul', inputs=[casted_input.output[0], const_zero.output[0]]) + ctx.make_node("Cast", inputs=[mul_node.output[0]], + attr={'to': dtypes[0]}, + name=node.name, outputs=node.output, + shapes=shapes, dtypes=dtypes) + + +@tf_op(["IteratorV2", "FIFOQueueV2"]) +class Iterator: + @classmethod + def version_8(cls, ctx, node, **kwargs): + ctx.remove_node(node.name) + + +@tf_op(["IteratorGetNext", "QueueDequeueV2"]) +class IteratorGetNext: + @classmethod + def version_8(cls, ctx, node, **kwargs): + output_names = node.output.copy() # to make sure remove_node + # does not alter the list + type_0 = ctx.get_dtype(output_names[0]) + type_1 = ctx.get_dtype(output_names[1]) + shape_0 = ctx.get_shape(output_names[0]) + shape_1 = ctx.get_shape(output_names[1]) + ctx.remove_node(node.name) + ctx.add_graph_input(output_names[0], type_0, shape_0) + ctx.add_graph_input(output_names[1], type_1, shape_1) + + +@tf_op(["QueueDequeueManyV2", "QueueDequeueUpToV2"]) +class QueueDequeueManyV2: + @classmethod + def version_8(cls, ctx, node, **kwargs): + outputs = node.output.copy() # copy to make remove_node + # does not alter the list + shapes = node.output_shapes + dtypes = node.output_dtypes + ctx.remove_node(node.name) + for i, output in enumerate(outputs): + ctx.add_graph_input(output, dtypes[i], shapes[i]) diff --git a/lib/python3.10/site-packages/tf2onnx/onnx_opset/logical.py b/lib/python3.10/site-packages/tf2onnx/onnx_opset/logical.py new file mode 100644 index 0000000000000000000000000000000000000000..400ddc99d0a7d753ec9846b65e23ed641d0fa1bd --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/onnx_opset/logical.py @@ -0,0 +1,137 @@ +# SPDX-License-Identifier: Apache-2.0 + + +""" +logical +""" + +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals + +import logging + +from onnx import TensorProto +from tf2onnx import utils +from tf2onnx.handler import tf_op +from tf2onnx.onnx_opset import common + + +logger = logging.getLogger(__name__) + +# pylint: disable=unused-argument,missing-docstring + +def _add_cast_to_inputs(graph, node, supported_dtypes, target_dtype): + is_support = True + for inp in node.input: + if graph.get_dtype(inp) not in supported_dtypes: + is_support = False + break + if not is_support: + for inp in node.input: + inp_cast = graph.insert_new_node_on_input(node, "Cast", inp, to=target_dtype) + graph.copy_shape(inp, inp_cast.output[0]) + graph.set_dtype(inp_cast.output[0], target_dtype) + + +def _add_cast_to_same_type_to_inputs(graph, node): + common_dtype = graph.get_dtype(node.input[0]) + + for inp in node.input[1:]: + if graph.get_dtype(inp) != common_dtype: + inp_cast = graph.insert_new_node_on_input(node, "Cast", inp, to=common_dtype) + graph.copy_shape(inp, inp_cast.output[0]) + graph.set_dtype(inp_cast.output[0], common_dtype) + + +@tf_op("LogicalNot", onnx_op="Not") +class DirectOp: + @classmethod + def version_1(cls, ctx, node, **kwargs): + pass + + +@tf_op("LogicalAnd", onnx_op="And") +@tf_op("LogicalOr", onnx_op="Or") +class BroadcastOp(common.BroadcastOp): + pass + + +@tf_op(["Equal", "NotEqual"]) +class Equal: + @classmethod + def version_1(cls, ctx, node, **kwargs): + need_not = node.type == "NotEqual" + common.BroadcastOp.version_1(ctx, node, **kwargs) + if need_not: + node.type = "Equal" + output_name = node.output[0] + not_node = ctx.insert_new_node_on_output("Not", output_name, name=utils.make_name(node.name)) + ctx.copy_shape(output_name, not_node.output[0]) + ctx.copy_dtype(output_name, not_node.output[0]) + + @classmethod + def version_7(cls, ctx, node, **kwargs): + # T2 output = Equal(T1, x, T1 y), T1 \in {bool, int32, int64} + need_not = node.type == "NotEqual" + supported_dtypes = [ + TensorProto.BOOL, + TensorProto.INT32, + TensorProto.INT64 + ] + # FIXME: casting is not the same as equal + target_dtype = TensorProto.INT32 + _add_cast_to_inputs(ctx, node, supported_dtypes, target_dtype) + if need_not: + node.type = "Equal" + output_name = node.output[0] + not_node = ctx.insert_new_node_on_output("Not", output_name, name=utils.make_name(node.name)) + ctx.copy_shape(output_name, not_node.output[0]) + ctx.copy_dtype(output_name, not_node.output[0]) + + @classmethod + def version_11(cls, ctx, node, **kwargs): + # starting with opset-11, equal supports all types (but both operands must be of the same type) + _add_cast_to_same_type_to_inputs(ctx, node) + need_not = node.type == "NotEqual" + if need_not: + node.type = "Equal" + output_name = node.output[0] + not_node = ctx.insert_new_node_on_output("Not", output_name, name=utils.make_name(node.name)) + ctx.copy_shape(output_name, not_node.output[0]) + ctx.copy_dtype(output_name, not_node.output[0]) + + +@tf_op(["Greater", "Less"]) +class GreaterLess: + @classmethod + def version_1(cls, ctx, node, **kwargs): + common.BroadcastOp.version_1(ctx, node, **kwargs) + + @classmethod + def version_7(cls, ctx, node, **kwargs): + # T2 output = Greater(T1 x, T1 y), T2=tensor(bool) + # T2 output = Less(T1 x, T1 y), T2=tensor(bool) + # Great/Less in opset7 only supports limited types, insert Cast if needed + supported_dtypes = [ + TensorProto.FLOAT, + TensorProto.FLOAT16, + TensorProto.DOUBLE + ] + target_dtype = TensorProto.FLOAT + _add_cast_to_inputs(ctx, node, supported_dtypes, target_dtype) + +@tf_op(["GreaterEqual", "LessEqual"]) +class GreaterLessEqual: + @classmethod + def version_7(cls, ctx, node, **kwargs): + GreaterLess.version_7(ctx, node, **kwargs) + output_name = node.output[0] + node.op.op_type = "Less" if node.op.op_type == "GreaterEqual" else "Greater" + new_node = ctx.insert_new_node_on_output("Not", output_name, name=utils.make_name(node.name)) + ctx.copy_shape(output_name, new_node.output[0]) + ctx.set_dtype(new_node.output[0], ctx.get_dtype(output_name)) + + @classmethod + def version_12(cls, ctx, node, **kwargs): + node.op.op_type = "GreaterOrEqual" if node.op.op_type == "GreaterEqual" else "LessOrEqual" diff --git a/lib/python3.10/site-packages/tf2onnx/onnx_opset/math.py b/lib/python3.10/site-packages/tf2onnx/onnx_opset/math.py new file mode 100644 index 0000000000000000000000000000000000000000..1e4b852624301bf1ad73f8b858e3a6aa60c3662b --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/onnx_opset/math.py @@ -0,0 +1,740 @@ +# SPDX-License-Identifier: Apache-2.0 + + +""" +math +""" + +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals + +import logging + +import numpy as np +from onnx import onnx_pb +from tf2onnx import constants, utils +from tf2onnx.handler import tf_op +from tf2onnx.onnx_opset import common + +logger = logging.getLogger(__name__) + + +# pylint: disable=unused-argument,missing-docstring + +@tf_op(["Add", "AddV2", "Div", "Mul", "Sub"]) +class BroadcastOp(common.BroadcastOp): + pass + + +@tf_op(["RealDiv", "TruncateDiv"], onnx_op="Div") +class RealDiv(common.BroadcastOp): + pass + + +@tf_op(["LeakyRelu", "Softplus", "Softsign"]) +class DirectOpSinceOpset1: + @classmethod + def version_1(cls, ctx, node, **kwargs): + pass + + +@tf_op(["Abs", "Ceil", "Elu", "Exp", "Floor", "Log", "Neg", "Relu", "Sigmoid", "Sqrt", + "Tanh", "Reciprocal"]) +class DirectOp: + @classmethod + def version_1(cls, ctx, node, **kwargs): + pass + + @classmethod + def version_6(cls, ctx, node, **kwargs): + if node.type == "Log": + # ORT doesn't implement Log on doubles + double_to_float = {onnx_pb.TensorProto.DOUBLE: onnx_pb.TensorProto.FLOAT} + node.maybe_cast_input([[onnx_pb.TensorProto.FLOAT]], double_to_float) + + +@tf_op(["Acos", "Asin", "Atan", "Cos", "Sin", "Tan"]) +class TrigOpSinceOpset7: + @classmethod + def version_7(cls, ctx, node, **kwargs): + pass + + +@tf_op(["Acosh", "Asinh", "Atanh", "Cosh", "Sinh"]) +class TrigOpSinceOpset9: + @classmethod + def version_9(cls, ctx, node, **kwargs): + pass + + +def make_min_or_max_op(ctx, op_type, inputs, outputs, + output_shapes=None, output_dtypes=None): + # support more dtype + supported_dtypes = [ + onnx_pb.TensorProto.FLOAT, + onnx_pb.TensorProto.FLOAT16, + onnx_pb.TensorProto.DOUBLE + ] + target_dtype = onnx_pb.TensorProto.FLOAT + need_cast = False + cast_inputs = [] + for inp in inputs: + dtype = ctx.get_dtype(inp) + utils.make_sure(dtype is not None, "dtype of {} is None".format(inp)) + if dtype not in supported_dtypes: + cast_inp = ctx.make_node("Cast", [inp], attr={"to": target_dtype}) + cast_inputs.append(cast_inp.output[0]) + need_cast = True + else: + cast_inputs.append(inp) + node = ctx.make_node(op_type, cast_inputs, shapes=output_shapes) + actual_outputs = node.output + if need_cast: + origin_dtype = ctx.get_dtype(inputs[0]) + if output_dtypes is not None: + origin_dtype = output_dtypes[0] + ctx.set_dtype(node.output[0], target_dtype) + cast_name = utils.make_name(node.name) + cast_node = ctx.insert_new_node_on_output("Cast", node.output[0], name=cast_name, to=origin_dtype) + ctx.set_dtype(cast_node.output[0], origin_dtype) + ctx.copy_shape(node.output[0], cast_node.output[0]) + actual_outputs = cast_node.output + final_node = ctx.make_node("Identity", actual_outputs, outputs=outputs, + shapes=output_shapes, dtypes=output_dtypes) + + # tensorflow minimum/maximum does support broadcast, onnx < opset 8 does not. + # handle this by doing something like: + # y = min(x1, add(x2, sub(x1, x1))), where x1, x2 are the inputs and x2 is a scalar + # this will create a tensor of zeros of the shape of x1, adds x2 to it (which broadcasts) and use that for min. + shapeo = ctx.get_shape(node.output[0]) + needs_broadcast_op = [] + has_correct_shape = [] + if ctx.opset < 8: + for i, input_name in enumerate(node.input): + if ctx.get_shape(input_name) != shapeo: + needs_broadcast_op.append(i) + else: + has_correct_shape.append(input_name) + if needs_broadcast_op: + has_correct_shape = has_correct_shape[0] + for i in needs_broadcast_op: + input_node = node.inputs[i] + # get a tensor with zeros (since there is no Fill op as of opset8) + sub_node = ctx.make_node("Sub", [has_correct_shape, has_correct_shape], + op_name_scope=input_node.name) + # use add as 'broadcast' op + add_node = ctx.make_node("Add", [input_node.output[0], sub_node.output[0]], + op_name_scope=input_node.name) + ctx.replace_input(node, node.input[i], add_node.output[0], i) + return final_node + + +@tf_op("Minimum", onnx_op="Min") +@tf_op("Maximum", onnx_op="Max") +class MinMaxOp: + @classmethod + def version_1(cls, ctx, node, **kwargs): + shapes = node.output_shapes + dtypes = node.output_dtypes + ctx.remove_node(node.name) + make_min_or_max_op(ctx, node.type, node.input, node.output, shapes, dtypes) + + @classmethod + def version_12(cls, ctx, node, **kwargs): + pass # support all numeric types and broadcasting + +@tf_op("ClipByValue") +class ClipByValueOp: + # in tf-1.8 there was a ClipByValue op which in later versions was replaced by max(min(x, a), b) + # To support models generated with tf-1.8 rewrite the tf ClipByValue op to max(min(x, a), b) + @classmethod + def version_8(cls, ctx, node, **kwargs): + supported = [onnx_pb.TensorProto.FLOAT16, onnx_pb.TensorProto.FLOAT, onnx_pb.TensorProto.DOUBLE] + # fetch those upfront since they are not accessible once we remove 'node' + shapes = node.output_shapes + dtypes = node.output_dtypes + input_dtype = ctx.get_dtype(node.input[0]) + name = node.name + min_node = node.input[1] + if ctx.get_dtype(min_node) not in supported: + # cast min if needed + min_node = ctx.insert_new_node_on_input(node, "Cast", min_node, to=onnx_pb.TensorProto.FLOAT).output[0] + max_node = node.input[2] + if ctx.get_dtype(max_node) not in supported: + # cast max if needed + max_node = ctx.insert_new_node_on_input(node, "Cast", max_node, to=onnx_pb.TensorProto.FLOAT).output[0] + ctx.remove_node(name) + new_node = ctx.make_node("Max", [node.input[0], min_node], outputs=[node.output[0]], + shapes=shapes, dtypes=dtypes) + if input_dtype not in supported: + # cast the data tensor if needed + ctx.insert_new_node_on_input(new_node, "Cast", new_node.input[0], to=onnx_pb.TensorProto.FLOAT) + + new_node = ctx.insert_new_node_on_output("Min", new_node.output[0], name=utils.make_name(name)) + new_node.input.append(max_node) + # copy shape and type + ctx.set_dtype(new_node.output[0], dtypes[0]) + ctx.set_shape(new_node.output[0], shapes[0]) + if dtypes[0] not in supported: + # cast output if needed + new_node = ctx.insert_new_node_on_output("Cast", new_node.output[0], + name=utils.make_name(name), to=dtypes[0]) + # copy shape and type + ctx.set_dtype(new_node.output[0], dtypes[0]) + ctx.set_shape(new_node.output[0], shapes[0]) + + @classmethod + def version_12(cls, ctx, node, **kwargs): + node.type = 'Clip' # clip supports all types now + +@tf_op(["LogSoftmax", "Softmax"]) +class Softmax: + @classmethod + def version_1(cls, ctx, node, **kwargs): + # T output = Softmax(T logits). The axis softmax would be performed on is always on -1. + # T output = Softmax(T input, @int axis). Default axis is 1. + logits_rank = len(ctx.get_shape(node.input[0])) + node.set_attr("axis", logits_rank - 1) + + @classmethod + def version_11(cls, ctx, node, **kwargs): + cls.version_1(ctx, node, **kwargs) + + @classmethod + def version_13(cls, ctx, node, **kwargs): + # Default axis is now -1. + pass + + +@tf_op("Square") +class Square: + @classmethod + def version_1(cls, ctx, node, **kwargs): + node.type = "Mul" + node.input.append(node.input[0]) + + +@tf_op("Relu6") +class Relu6: + @classmethod + def version_1(cls, ctx, node, **kwargs): + # relu6 = min(max(features, 0), 6) + # relu6 = min(max(features, 0), 6) + node.type = "Clip" + node.set_attr("min", 0.0) + node.set_attr("max", 6.0) + + @classmethod + def version_11(cls, ctx, node, **kwargs): + # add min and max as inputs + node.type = "Clip" + onnx_dtype = ctx.get_dtype(node.input[0]) + np_dtype = utils.ONNX_TO_NUMPY_DTYPE[onnx_dtype] + clip_min = ctx.make_const(utils.make_name("{}_min".format(node.name)), np.array(0.0, dtype=np_dtype)) + clip_max = ctx.make_const(utils.make_name("{}_max".format(node.name)), np.array(6.0, dtype=np_dtype)) + node.input.append(clip_min.output[0]) + node.input.append(clip_max.output[0]) + + +@tf_op("Rsqrt") +class Rsqrt: + @classmethod + def version_1(cls, ctx, node, **kwargs): + node.type = "Sqrt" + op_name = utils.make_name(node.name) + reciprocal = ctx.insert_new_node_on_output("Reciprocal", node.output[0], name=op_name) + ctx.copy_shape(node.output[0], reciprocal.output[0]) + + +@tf_op("SquaredDifference") +class SquaredDifference: + @classmethod + def version_1(cls, ctx, node, **kwargs): + node.type = "Sub" + op_name = utils.make_name(node.name) + mul = ctx.insert_new_node_on_output("Mul", node.output[0], name=op_name) + mul.input.append(node.output[0]) + + +@tf_op("Sign") +class Sign: + @classmethod + def version_1(cls, ctx, node, **kwargs): + """Sign op.""" + # T sign = Sign(T Input) + node_dtype = ctx.get_dtype(node.output[0]) + utils.make_sure(node_dtype, "Dtype of {} is None".format(node.name)) + if node_dtype in [onnx_pb.TensorProto.COMPLEX64, onnx_pb.TensorProto.COMPLEX128]: + raise ValueError("dtype " + str(node_dtype) + " is not supported in onnx for now") + zero_name = utils.make_name("{}_zero".format(node.name)) + ctx.make_const(zero_name, np.array(0, dtype=np.float32)) + if node_dtype not in [onnx_pb.TensorProto.FLOAT16, onnx_pb.TensorProto.FLOAT, onnx_pb.TensorProto.DOUBLE]: + cast_node_0 = ctx.make_node("Cast", [node.input[0]], {"to": onnx_pb.TensorProto.FLOAT}) + greater_node = ctx.make_node("Greater", [cast_node_0.output[0], zero_name]) + less_node = ctx.make_node("Less", [cast_node_0.output[0], zero_name]) + else: + greater_node = ctx.make_node("Greater", [node.input[0], zero_name]) + less_node = ctx.make_node("Less", [node.input[0], zero_name]) + cast_node_1 = ctx.make_node("Cast", [greater_node.output[0]], {"to": node_dtype}) + cast_node_2 = ctx.make_node("Cast", [less_node.output[0]], {"to": node_dtype}) + + shapes = node.output_shapes + dtypes = node.output_dtypes + ctx.remove_node(node.name) + ctx.make_node("Sub", [cast_node_1.output[0], cast_node_2.output[0]], outputs=[node.output[0]], + shapes=shapes, dtypes=dtypes) + + @classmethod + def version_9(cls, ctx, node, **kwargs): + node_dtype = ctx.get_dtype(node.output[0]) + utils.make_sure(node_dtype, "dtype of {} is None".format(node.name)) + if node_dtype in [onnx_pb.TensorProto.BOOL, onnx_pb.TensorProto.COMPLEX64, onnx_pb.TensorProto.COMPLEX128]: + raise ValueError("dtype " + str(node_dtype) + " is not supported in onnx for now") + + +@tf_op("Pow") +class Pow: + @classmethod + def version_1(cls, ctx, node, **kwargs): + if ctx.is_target(constants.TARGET_CAFFE2): + # workaround a bug in caffe2 pre Feb2018, pow(a, b) becomes np.exp(np.log(a) * b) + node.type = "Log" + b = node.input[1] + ctx.remove_input(node, node.input[1], 1) + op_name = utils.make_name(node.name) + mul_op = ctx.insert_new_node_on_output("Mul", node.output[0], name=op_name) + mul_op.input.append(b) + op_name = utils.make_name(node.name) + exp_op = ctx.insert_new_node_on_output("Exp", mul_op.output[0], name=op_name) + ctx.copy_shape(node.output[0], exp_op.output[0]) + BroadcastOp.version_1(ctx, mul_op, **kwargs) + + @classmethod + def version_7(cls, ctx, node, **kwargs): + pass + + +@tf_op("LRN") +class LRN: + @classmethod + def version_1(cls, ctx, node, **kwargs): + # ONNX: Each input value is divided by (bias+(alpha/size)*sum(xi^2 for every xi in the local region))^beta + # TF: sqr_sum[a, b, c, d] = sum(input[a, b, c, d - depth_radius : d + depth_radius + 1] ** 2) + # output = input / (bias + alpha * sqr_sum) ** beta + + # by default, depth_radius is 5 in tensorflow + size = node.get_attr_value("depth_radius", 5) * 2 + 1 + + node.set_attr("size", size) + node.set_attr("alpha", size * node.get_attr("alpha").f) + + shapes = node.output_shapes[0] + dtypes = node.output_dtypes[0] + + ctx.insert_new_node_on_input(node, "Transpose", node.input[0], perm=constants.NHWC_TO_NCHW) + ctx.update_node_shape_dtype(node, override=True) + op_name = utils.make_name(node.name) + ctx.insert_new_node_on_output("Transpose", node.output[0], perm=constants.NCHW_TO_NHWC, + name=op_name, shapes=shapes, dtypes=dtypes) + + +@tf_op(["MatMul", "BatchMatMul", "BatchMatMulV2"]) +class MatMul: + @classmethod + def version_1(cls, ctx, node, **kwargs): + # tensorflow allows transpose and conjugated. If found, insert the required transpose. + # We could use Gemm as well but tensorflow does not pass bias in matmul. + node.type = "MatMul" + + attrs = ["transpose_a", "transpose_b", "adjoint_a", "adjoint_b", "adj_x", "adj_y"] + attrs_val = [node.get_attr(attr) for attr in attrs] + attrs_val = [0 if val is None else val.i for val in attrs_val] + + dtype = ctx.get_dtype(node.output[0]) + if any(attrs_val[2:]): + # conjugation operation on complex data not supported in onnx for now + # so if it's complex than raise exception + if dtype not in [onnx_pb.TensorProto.FLOAT, onnx_pb.TensorProto.FLOAT16, onnx_pb.TensorProto.DOUBLE]: + raise ValueError("dtype " + dtype + " is not supported in onnx matmul for now") + + transpose_a = (attrs_val[0] + attrs_val[2] + attrs_val[4]) % 2 + transpose_b = (attrs_val[1] + attrs_val[3] + attrs_val[5]) % 2 + + if transpose_a != 0: + shape = ctx.get_shape(node.input[0]) + if shape: + perm = list(range(0, len(shape))) + tmp = perm[-1] + perm[-1] = perm[-2] + perm[-2] = tmp + ctx.insert_new_node_on_input(node, "Transpose", node.input[0], perm=perm) + + if transpose_b != 0: + shape = ctx.get_shape(node.input[1]) + if shape: + perm = list(range(0, len(shape))) + tmp = perm[-1] + perm[-1] = perm[-2] + perm[-2] = tmp + ctx.insert_new_node_on_input(node, "Transpose", node.input[1], perm=perm) + + unsupported = ["a_is_sparse", "b_is_sparse"] + for i in unsupported: + val = node.get_attr(i) + if val is not None and val.i != 0: + raise ValueError(node.type + " attribute " + i + " is not supported") + + +@tf_op("Erf") +class Erf: + @classmethod + def version_1(cls, ctx, node, **kwargs): + """Error function.""" + # constant names + a1 = "erf_a1" + a2 = "erf_a2" + a3 = "erf_a3" + a4 = "erf_a4" + a5 = "erf_a5" + p = "erf_p" + one = "erf_one" + null = "erf_null" + + n = node.name + output_name = node.output[0] + erf_a1_node = ctx.get_node_by_output("erf_a1") + if erf_a1_node is None: + # insert the constants for erf once + ctx.make_const(a1, np.array(0.254829592, dtype=np.float32)) + ctx.make_const(a2, np.array(-0.284496736, dtype=np.float32)) + ctx.make_const(a3, np.array(1.421413741, dtype=np.float32)) + ctx.make_const(a4, np.array(-1.453152027, dtype=np.float32)) + ctx.make_const(a5, np.array(1.061405429, dtype=np.float32)) + ctx.make_const(p, np.array(0.3275911, dtype=np.float32)) + ctx.make_const(one, np.array(1., dtype=np.float32)) + ctx.make_const(null, np.array(0., dtype=np.float32)) + + x = node.input[0] + + # erf(x): + # sign = 1 if x >= 0 else -1 + # x = abs(x) + # # A&S formula 7.1.26 + # t = 1.0 / (1.0 + p * x) + # y = 1.0 - (((((a5 * t + a4) * t) + a3) * t + a2) * t + a1) * t * math.exp(-x * x) + # return sign * y # erf(-x) = -erf(x) + + x_node = ctx.make_node("Abs", [x], op_name_scope=node.name, name="x") + negx_node = ctx.make_node("Sub", [null, x], op_name_scope=node.name, name="negx") + is_positive_node = ctx.make_node("Greater", [x, null], op_name_scope=node.name, name="isPositive") + is_positive_value_node = ctx.make_node("Cast", is_positive_node.output, op_name_scope=node.name, + name="isPositiveValue", attr={"to": onnx_pb.TensorProto.FLOAT}) + is_neg_node = ctx.make_node("Less", [x, null], op_name_scope=node.name, name="isNeg") + ig_neg_value_node = ctx.make_node("Cast", is_neg_node.output, op_name_scope=node.name, name="isNegValue", + attr={"to": onnx_pb.TensorProto.FLOAT}) + sign0_node = ctx.make_node("Sub", [is_positive_value_node.output[0], ig_neg_value_node.output[0]], + op_name_scope=node.name, name="sign0") + sign_add_one_node = ctx.make_node("Add", [sign0_node.output[0], one], op_name_scope=node.name, + name="signAddOne") + non_zero_node = ctx.make_node("Abs", sign0_node.output, op_name_scope=node.name, name="nonZero") + sign_node = ctx.make_node("Sub", [sign_add_one_node.output[0], non_zero_node.output[0]], + op_name_scope=node.name, name="sign") + num_4_node = ctx.make_node("Mul", [x_node.output[0], p], op_name_scope=node.name, name="4") + num_5_node = ctx.make_node("Add", [num_4_node.output[0], one], op_name_scope=node.name, name="5") + t_node = ctx.make_node("Div", [one, num_5_node.output[0]], op_name_scope=node.name, name="t") + xsq_node = ctx.make_node("Mul", [x, negx_node.output[0]], op_name_scope=node.name, name="xsq") + num_6_node = ctx.make_node("Exp", xsq_node.output, op_name_scope=node.name, name="6") + num_7_node = ctx.make_node("Mul", [num_6_node.output[0], t_node.output[0]], op_name_scope=node.name, name="7") + num_8_node = ctx.make_node("Mul", [t_node.output[0], a5], op_name_scope=node.name, name="8") + num_9_node = ctx.make_node("Add", [num_8_node.output[0], a4], op_name_scope=node.name, name="9") + num_10_node = ctx.make_node("Mul", [num_9_node.output[0], t_node.output[0]], op_name_scope=node.name, name="10") + num_11_node = ctx.make_node("Add", [num_10_node.output[0], a3], op_name_scope=node.name, name="11") + num_12_node = ctx.make_node("Mul", [num_11_node.output[0], t_node.output[0]], op_name_scope=node.name, + name="12") + num_13_node = ctx.make_node("Add", [num_12_node.output[0], a2], op_name_scope=node.name, name="13") + num_14_node = ctx.make_node("Mul", [num_13_node.output[0], t_node.output[0]], op_name_scope=node.name, + name="14") + num_15_node = ctx.make_node("Add", [num_14_node.output[0], a1], op_name_scope=node.name, name="15") + num_16_node = ctx.make_node("Mul", [num_15_node.output[0], num_7_node.output[0]], op_name_scope=node.name, + name="16") + num_17_node = ctx.make_node("Sub", [one, num_16_node.output[0]], op_name_scope=node.name, name="17") + + shapes = node.output_shapes + dtypes = node.output_dtypes + ctx.remove_node(node.name) + ctx.make_node("Mul", [num_17_node.output[0], sign_node.output[0]], outputs=[output_name], name=n, + shapes=shapes, dtypes=dtypes) + + @classmethod + def version_9(cls, ctx, node, **kwargs): + pass + + +@tf_op("FloorDiv") +class FloorDiv: + @classmethod + def version_6(cls, ctx, node, **kwargs): + # T output = FloorDiv(T x, T y) + node.type = "Div" + dtype = ctx.get_dtype(node.input[0]) + if dtype in [onnx_pb.TensorProto.FLOAT, onnx_pb.TensorProto.FLOAT16, onnx_pb.TensorProto.DOUBLE]: + new_node_name = utils.make_name("floor_div_res") + floor_res = ctx.insert_new_node_on_output(op_type="Floor", output_name=node.output[0], + name=new_node_name) + ctx.copy_dtype(node.output[0], floor_res.output[0]) + ctx.copy_shape(node.output[0], floor_res.output[0]) + + +@tf_op("FloorMod") +class FloorMod: + @classmethod + def version_7(cls, ctx, node, **kwargs): + # T output = FloorMod(T x, T y) + div = ctx.make_node(op_type="Div", inputs=node.input) + dtype = ctx.get_dtype(node.input[0]) + if dtype in [onnx_pb.TensorProto.FLOAT, onnx_pb.TensorProto.FLOAT16, onnx_pb.TensorProto.DOUBLE]: + div = ctx.make_node(op_type="Floor", inputs=div.output) + + mul = ctx.make_node(op_type="Mul", inputs=[div.output[0], node.input[1]]) + # res node will take over shape&dtype&output connection info of original "node" + shapes = node.output_shapes + dtypes = node.output_dtypes + ctx.remove_node(node.name) + ctx.make_node(op_type="Sub", inputs=[node.input[0], mul.output[0]], + name=node.name, outputs=node.output, shapes=shapes, dtypes=dtypes) + + +@tf_op("Selu") +class Selu: + @classmethod + def version_1(cls, ctx, node, **kwargs): + pass + + +@tf_op("Cumsum", onnx_op="CumSum") +class CumSum: + @classmethod + def version_11(cls, ctx, node, **kwargs): + pass + + +@tf_op("Round") +class Round: + @classmethod + def version_11(cls, ctx, node, **kwargs): + pass + + +@tf_op("MatrixDeterminant", onnx_op="Det") +class Det: + @classmethod + def version_11(cls, ctx, node, **kwargs): + pass + + +@tf_op(["LeftShift", "RightShift"]) +class BitShift: + + @classmethod + def version_11(cls, ctx, node, **kwargs): + dir_map = {"LeftShift": "LEFT", "RightShift": "RIGHT"} + direction = dir_map[node.type] + supported = [onnx_pb.TensorProto.UINT8, onnx_pb.TensorProto.UINT16, + onnx_pb.TensorProto.UINT32, onnx_pb.TensorProto.UINT64] + type_map = {onnx_pb.TensorProto.INT8: onnx_pb.TensorProto.UINT8, + onnx_pb.TensorProto.INT16: onnx_pb.TensorProto.UINT32, + onnx_pb.TensorProto.INT32: onnx_pb.TensorProto.UINT64} + shapes = node.output_shapes + dtypes = node.output_dtypes + ctx.remove_node(node.name) + + node = ctx.make_node("BitShift", inputs=node.input, outputs=node.output, name=node.name, + shapes=shapes, dtypes=dtypes, domain=constants.ONNX_DOMAIN, attr={'direction': direction}) + + if node.maybe_cast_input([supported, supported], type_map): + cast_back_node = ctx.insert_new_node_on_output( + "Cast", node.output[0], name=utils.make_name(node.name) + "_castback", + to=dtypes[0]) + ctx.set_dtype(cast_back_node.output[0], dtypes[0]) + ctx.copy_shape(node.name, cast_back_node.output[0]) + + +@tf_op("SquaredDistance", onnx_op="MeanSquaredDistance") +class SquaredDistance: + @classmethod + def version_12(cls, ctx, node, **kwargs): + node.attr["reduction"] = "none" + + +@tf_op("Einsum") +class Einsum: + @classmethod + def version_12(cls, ctx, node, **kwargs): + del node.attr["N"] + node.attr["equation"].s = node.attr["equation"].s.lower() + + +@tf_op("IsFinite") +class IsFinite: + @classmethod + def version_10(cls, ctx, node, **kwargs): + # map to onnx as: + # not (isinf(x) or isnan(x)) + + shapes = node.output_shapes + dtypes = [onnx_pb.TensorProto.BOOL] * len(node.output_dtypes) + outputs = node.output + + ctx.remove_node(node.name) + + inf_node = ctx.make_node("IsInf", inputs=node.input, name=utils.make_name(node.name), + shapes=shapes, dtypes=dtypes) + nan_node = ctx.make_node("IsNaN", inputs=node.input, name=utils.make_name(node.name), + shapes=shapes, dtypes=dtypes) + or_node = ctx.make_node("Or", inputs=[inf_node.output[0], nan_node.output[0]], name=utils.make_name(node.name), + shapes=shapes, dtypes=dtypes) + _ = ctx.make_node("Not", inputs=or_node.output, name=node.name, outputs=outputs, + shapes=shapes, dtypes=dtypes) + + +@tf_op("Atan2") +class Atan2Op: + # support more dtype + + @classmethod + def version_9(cls, ctx, node, **kwargs): + """ + Obtained with a linear regression. + + :: + + def atan2(y, x): + sx = numpy.sign(x) + sy = numpy.sign(y) + pi_part = (sy + sx * (sy ** 2 - 1)) * (sx - 1) * (-numpy.pi/2) + atan_part = numpy.arctan(y / (x + (1 - sx ** 2))) * sx ** 2 + return atan_part + pi_part + """ + supported_dtypes = [ + onnx_pb.TensorProto.FLOAT, + onnx_pb.TensorProto.FLOAT16, + onnx_pb.TensorProto.DOUBLE + ] + + onnx_dtype = ctx.get_dtype(node.input[0]) + utils.make_sure(onnx_dtype in supported_dtypes, "Unsupported input type.") + shape = ctx.get_shape(node.input[0]) + np_dtype = utils.map_onnx_to_numpy_type(onnx_dtype) + + # sign part + + sign_x_node = ctx.make_node( + "Sign", inputs=node.input[1:], + name=utils.make_name(node.name + 'signx')) + sign_y_node = ctx.make_node( + "Sign", inputs=node.input[:1], + name=utils.make_name(node.name + 'signy')) + + sx_node = ctx.make_node( + "Cast", sign_x_node.output[:1], attr={"to": onnx_dtype}, + name=utils.make_name(node.name + 'csignx')) + sy_node = ctx.make_node( + "Cast", sign_y_node.output[:1], attr={"to": onnx_dtype}, + name=utils.make_name(node.name + 'csigny')) + + # cst + + one_node = ctx.make_const( + utils.make_name("{}_one".format(node.name)), + np.array([1], dtype=np_dtype)) + + pib2_node = ctx.make_const( + utils.make_name("{}_pi".format(node.name)), + np.array(- np.pi / 2, dtype=np_dtype)) + + # pi_part = (sy + sx * (sy ** 2 - 1)) * (sx - 1) * (-numpy.pi/2) + + sxm1_node = ctx.make_node( + "Sub", [sx_node.output[0], one_node.output[0]], + name=utils.make_name(node.name + 'sxm1')) + sy2_node = ctx.make_node( + "Mul", [sy_node.output[0], sy_node.output[0]], + name=utils.make_name(node.name + 'sy2')) + sy2m1_node = ctx.make_node( + "Sub", [sy2_node.output[0], one_node.output[0]], + name=utils.make_name(node.name + 'sy2m1')) + sxsy2m1_node = ctx.make_node( + "Mul", [sx_node.output[0], sy2m1_node.output[0]], + name=utils.make_name(node.name + 'sxsy2m1')) + sysxsy2m1_node = ctx.make_node( + "Add", [sy_node.output[0], sxsy2m1_node.output[0]], + name=utils.make_name(node.name + 'sysxsy2m1')) + m1_node = ctx.make_node( + "Mul", [sysxsy2m1_node.output[0], sxm1_node.output[0]], + name=utils.make_name(node.name + 'm1')) + pi_part = ctx.make_node( + "Mul", [m1_node.output[0], pib2_node.output[0]], + name=utils.make_name(node.name + 'pip')) + + # atan + + sx2_node = ctx.make_node( + "Mul", [sx_node.output[0], sx_node.output[0]], + name=utils.make_name(node.name + 'sx2')) + sx2m1_node = ctx.make_node( + "Sub", [sx2_node.output[0], one_node.output[0]], + name=utils.make_name(node.name + 'sx2m1')) + xsx2m1_node = ctx.make_node( + "Add", [node.input[1], sx2m1_node.output[0]], + name=utils.make_name(node.name + 'xsx2m1')) + div_node = ctx.make_node( + "Div", inputs=[node.input[0], xsx2m1_node.output[0]], + name=utils.make_name(node.name + 'div')) + atan0_node = ctx.make_node( + "Atan", inputs=[div_node.output[0]], + name=utils.make_name(node.name + 'atan0')) + atan_node = ctx.make_node( + "Mul", inputs=[sx2_node.output[0], atan0_node.output[0]], + name=utils.make_name(node.name + 'atan')) + + # final + + ctx.remove_node(node.name) + + last_node = ctx.make_node( + "Add", inputs=[atan_node.output[0], pi_part.output[0]], + op_name_scope=node.name + 'all', + shapes=[shape], dtypes=[onnx_dtype]) + ctx.replace_all_inputs(node.output[0], last_node.output[0]) # ops=ctx.get_nodes() + + +@tf_op("InvertPermutation") +class InvertPermutationOp: + + @classmethod + def version_11(cls, ctx, node, **kwargs): + + supported_dtypes = [onnx_pb.TensorProto.INT32, onnx_pb.TensorProto.INT64] + onnx_dtype = ctx.get_dtype(node.input[0]) + utils.make_sure(onnx_dtype in supported_dtypes, "InvertPermutation only applies on INT32, INT64.") + + shape = ctx.get_shape(node.input[0]) + + shape_node = ctx.make_node( + "Shape", inputs=node.input, name=utils.make_name(node.name + '_shape')) + + neg_node = ctx.make_node( + "Neg", inputs=node.input, name=utils.make_name(node.name + '_neg')) + + topk_node = ctx.make_node( + "TopK", inputs=[neg_node.output[0], shape_node.output[0]], + name=utils.make_name(node.name + '_topk'), output_count=2) + + ctx.remove_node(node.name) + + last_node = ctx.make_node( + "Identity", inputs=topk_node.output[1:], name=utils.make_name(node.name + '_indices'), + shapes=[shape], dtypes=[onnx_dtype]) + + ctx.replace_all_inputs(node.output[0], last_node.output[0]) # ops=ctx.get_nodes() diff --git a/lib/python3.10/site-packages/tf2onnx/onnx_opset/misc.py b/lib/python3.10/site-packages/tf2onnx/onnx_opset/misc.py new file mode 100644 index 0000000000000000000000000000000000000000..4935c6b444504d0b23e8c88b6ff89ec6ccb467c6 --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/onnx_opset/misc.py @@ -0,0 +1,48 @@ +# SPDX-License-Identifier: Apache-2.0 + + +""" +misc +""" + +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals + +import logging + +from tf2onnx.handler import tf_op + + +logger = logging.getLogger(__name__) + +# pylint: disable=unused-argument,missing-docstring + +@tf_op(["CheckNumerics", "StopGradient"]) +class MoveToIdent: + @classmethod + def version_1(cls, ctx, node, **kwargs): + node.type = "Identity" + if node.inputs[0].is_const(): + # should not remove the identity node if it is output of the graph + if node.output[0] in ctx.outputs: + return + # if identity has a const as input, remove it + input_name = node.input[0] + output_name = node.output[0] + ctx.replace_all_inputs(output_name, input_name) # ops=ctx.get_nodes() + ctx.remove_node(node.name) + + +@tf_op(["Placeholder", "PlaceholderV2", "PlaceholderWithDefault"]) +class DirectOp: + @classmethod + def version_1(cls, ctx, node, **kwargs): + pass + + +@tf_op("NoOp") +class NukeNode: + @classmethod + def version_1(cls, ctx, node, **kwargs): + ctx.remove_node(node.name) diff --git a/lib/python3.10/site-packages/tf2onnx/onnx_opset/nn.py b/lib/python3.10/site-packages/tf2onnx/onnx_opset/nn.py new file mode 100644 index 0000000000000000000000000000000000000000..3d37ef489d6635241bb20eb82bf30c37eb0a94ea --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/onnx_opset/nn.py @@ -0,0 +1,1534 @@ +# SPDX-License-Identifier: Apache-2.0 + + +""" +nn +""" + +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals + +import logging + +import numpy as np +from onnx import onnx_pb, helper +from onnx.onnx_pb import TensorProto +from tf2onnx import constants, utils +from tf2onnx.graph_builder import GraphBuilder +from tf2onnx.handler import tf_op +from tf2onnx.onnx_opset import common, controlflow, tensor + +logger = logging.getLogger(__name__) + + +# pylint: disable=unused-argument,missing-docstring,unused-variable + +def spatial_map(shape, perm): + new_shape = shape[:] + for i in perm: + new_shape[i] = shape[perm[i]] + return new_shape + + +def is_channels_last(node): + """Returns whether node is channels last, so (N, ..., C).""" + + return not node.data_format.startswith("NC") + + +def make_shape_channels_first(shape): + """Makes a (N, ..., C) shape into (N, C, ...).""" + + return shape[:1] + shape[-1:] + shape[1:-1] + + +def make_shape_channels_last(shape): + """Makes a (N, C, ...) shape into (N, ..., C).""" + + return shape[:1] + shape[1:-1] + shape[1:2] + + +def get_channels_first_permutation(spatial): + """Returns a permutation to make a (N, ..., C) array into (N, C, ...).""" + + return [0, spatial + 1] + list(range(1, spatial + 1)) + + +def get_channels_last_permutation(spatial): + """Returns a permutation to make a (N, C, ...) array into (N, ..., C).""" + + return [0] + list(range(2, spatial + 2)) + [1] + + +def conv_convert_inputs(ctx, node, with_kernel=False, new_kernel_shape=None, + input_indices=None, output_indices=None, spatial=2): + """Convert input and kernel from tensorflow to onnx. This maybe require to + to insert transpose ops for input, kernel and output unless they are constants + and we can transpose the constant. + We transpose inputs if they are in NHWC. We always transpose the kernel from + HWNC to NCHW. Outputs are transposed if the format is NHWC. + Some convolutions like depthwise_conv2d require a reshape of the kernel. + + Args: + ctx: The parent graph. + node: Node of the convolution op. + with_kernel: Transpose the kernel. + new_kernel_shape: Pass to reshape the kernel. + input_indices: Indices that define the inputs. + output_indices: Indices that define the outputs. + """ + + if input_indices is None: + input_indices = [0] + if output_indices is None: + output_indices = [0] + + # Transpose inputs if needed. + if is_channels_last(node): + # Ge channels first permutation. + permutation = get_channels_first_permutation(spatial) + + # Transpose input if needed, no need to record shapes on input + for idx in input_indices: + # If input is a constant, transpose that one if we are the only consumer. + input_node = node.inputs[idx] + input_name = node.input[idx] + + if input_node.is_const() and len(ctx.find_output_consumers(input_name)) == 1: + # Transpose constant to make it channels first. + val = input_node.get_tensor_value(as_list=False) + val = np.transpose(val, permutation) + + input_node.set_tensor_value(val) + else: + # Insert transpose op. + transpose = ctx.insert_new_node_on_input(node, "Transpose", input_name) + transpose.set_attr("perm", permutation) + transpose.skip_conversion = True + + shape = ctx.get_shape(input_name) + if shape is not None: + new_shape = make_shape_channels_first(shape) + + ctx.set_shape(transpose.output[0], new_shape) + + # Transpose kernel if needed. + if with_kernel: + # Some ONNX convolution ops require to reshape the kernel (ie. depthwise_conv2d). + if new_kernel_shape: + kernel_name = node.input[1] + if ctx.opset < 5: + # Old reshape takes new shape as attribute. + reshape = ctx.insert_new_node_on_input(node, "Reshape", kernel_name) + reshape.set_attr("shape", new_kernel_shape) + reshape.skip_conversion = True + else: + # New reshape takes new shape as input[1]. + shape_name = utils.make_name(node.name) + ctx.make_const(shape_name, np.array(new_kernel_shape, dtype=np.int64)) + + reshape = ctx.make_node("Reshape", [kernel_name, shape_name]) + ctx.replace_input(node, kernel_name, reshape.output[0], 1) + + reshape.skip_conversion = True + ctx.set_shape(reshape.output[0], new_kernel_shape) + + # Get kernel (may have be changed to a reshape above). + kernel_node = node.inputs[1] + kernel_name = node.input[1] + + # Transpose kernel from (..., C_in, C_out) to (C_out, C_in, ...) + permutation = [spatial + 1, spatial] + list(range(spatial)) + + # If kernel is a constant, transpose that one if we are the only consumer. + need_transpose = True + if kernel_node.is_const() and len(ctx.find_output_consumers(kernel_name)) == 1: + val = kernel_node.get_tensor_value(as_list=False) + val = np.transpose(val, permutation) + + kernel_node.set_tensor_value(val) + need_transpose = False + + if need_transpose: + transpose = ctx.insert_new_node_on_input(node, "Transpose", kernel_name) + transpose.set_attr("perm", permutation) + transpose.skip_conversion = True + + new_shape = spatial_map(ctx.get_shape(kernel_name), permutation) + ctx.set_shape(transpose.output[0], new_shape) + + # Transpose outputs back if needed. + if is_channels_last(node): + for idx in output_indices: + # Make output channels last again by transposing. + output_name = node.output[idx] + output_shape = ctx.get_shape(node.output[idx]) + + permutation = get_channels_last_permutation(spatial) + + op_name = utils.make_name(node.name) + transpose = ctx.insert_new_node_on_output("Transpose", output_name, name=op_name) + + transpose.set_attr("perm", permutation) + transpose.skip_conversion = True + + # Set tensorflow channels last shape as the transpose node shape. + ctx.set_shape(transpose.output[0], output_shape) + + # Make the current ONNX convolution output shape channels first. + ctx.set_shape(output_name, make_shape_channels_first(output_shape)) + + # NOTE: Not strictly correct as it can also be NCW or NCDHW for example. + # NOTE: Generally speaking it's channels first. + node.data_format = "NCHW" + + +def add_padding(ctx, node, kernel_shape, strides, dilations=None, spatial=2): + padding = node.get_attr("padding") + if not padding: + return + + if dilations is None: + dilations = [1] * spatial + + padding = padding.s.decode("utf-8") + if padding == "SAME": + # Initialize with all zeros. + # Paddings are in (x_begin, y_begin, ..., x_end, y_end, ...) order. + pads = [0] * (spatial * 2) + + # Get shapes and check whether valid. + input_shape = ctx.get_shape(node.input[0]) + output_shape = ctx.get_shape(node.output[0]) + + if len(input_shape) != spatial + 2: + raise ValueError( + "node {} output needs to be rank {}, is {}".format( + node.name, spatial + 2, len(input_shape) + ) + ) + + if len(output_shape) != spatial + 2: + raise ValueError( + "node {} output needs to be rank {}, is {}".format( + node.name, spatial + 2, len(output_shape) + ) + ) + + # Transpose to channels first if not so. + if is_channels_last(node): + input_shape = make_shape_channels_first(input_shape) + output_shape = make_shape_channels_first(output_shape) + + # Check for unknown input/output dimensions. Fall back to auto padding if so. + if any(input_shape[i + 2] == -1 or output_shape[i + 2] == -1 for i in range(spatial)): + logger.debug( + "node %s has unknown dim for pads calculation, fallback to auto_pad: " + "input_shape=%s, output_shape=%s", + node.name, + input_shape, + output_shape, + ) + + node.set_attr("auto_pad", "SAME_UPPER") + return + + # Calculate paddings. + for i in range(spatial): + pad = ( + (output_shape[i + 2] - 1) * strides[i] + + dilations[i] * (kernel_shape[i] - 1) + 1 + - input_shape[i + 2] + ) + pad = max(pad, 0) + + pads[i] = pad // 2 + pads[i + spatial] = pad - pad // 2 + + node.set_attr("pads", pads) + elif padding == "VALID": + pass + else: + raise ValueError("invalid padding value: {}".format(padding)) + +def parse_dims_attr(node, dims, spatial): + if is_channels_last(node): + # We have (N, ..., C) or (...). + if len(dims) != spatial: + dims = dims[1:-1] + else: + # We have (N, C, ...). + dims = dims[2:] + return dims + +def conv_dims_attr(node, name, new_name=None, spatial=2): + # Fetch attribute. + if new_name is None: + new_name = name + + dims = node.get_attr(name) + if not dims: + return None + + # Get spatial part. + dims = dims.ints + dims = parse_dims_attr(node, dims, spatial) + + # Set new value and return it. + node.set_attr(new_name, dims) + + return dims + + +def conv_kernel_shape(ctx, node, input_idx, spatial=2): + # Kernel shape is (..., C_in, C_out). + kernel_shape = ctx.get_shape(node.input[input_idx]) + if len(kernel_shape) != spatial + 2: + raise ValueError("kernel rank must be spatial+2") + + # Get spatial part. + kernel_shape = kernel_shape[:spatial] + + # Set new value and return it. + node.set_attr("kernel_shape", kernel_shape) + + return kernel_shape + + +def build_dynamic_target_size(ctx, transposed_intput, target_hw): + """ + Build the target tensor shape for the Resize op. + + Args: + - ctx: the graph context + - transposed_intput: A tensor of rank 4 of shape [n c h w] + - target_hw: tensor of rank 2 containing the target size for a resize: [nh nw] + + Returns: + A tensor of rank 2 containing [n c nh nw] + """ + # We get the first half [n c] of the target shape + shape_of_transposed_input = ctx.make_node("Shape", [transposed_intput]) + first_half_of_shape = GraphBuilder(ctx).make_slice( + {"data": shape_of_transposed_input.output[0], "ends": [2], "starts": [0]}) + target_size_int64 = ctx.make_node("Cast", [target_hw], attr={'to': TensorProto.INT64}) + # We build a tensor containing [n c nh nw] + final_target_size = ctx.make_node("Concat", [first_half_of_shape, target_size_int64.output[0]], {'axis': 0}) + return final_target_size + + +@tf_op(["Conv1D", "Conv2D", "Conv3D"]) +class ConvOp: + @classmethod + def any_version(cls, opset, ctx, node, **kwargs): + # ONNX specification: + # + # T output = Conv2D(T input, T filter, @list(int) strides, @bool use_cudnn_on_gpu, + # @string padding, @string data_format) + # + # T Y = Conv(T X, T W, T B, @AttrType.STRING auto_pad, @AttrType.INTS dilations, @AttrType.INT group, + # @AttrType.INTS kernel_shape, @AttrType.INTS pads, @AttrType.INTS strides) + # + + # Determine number of spatial dimensions. + spatial = int(node.type[-2]) + + # Make it a convolution node. + node.type = "Conv" + + # Determine kernel spatial shape, strides and dilations. + kernel_shape = conv_kernel_shape(ctx, node, 1, spatial=spatial) + strides = conv_dims_attr(node, "strides", spatial=spatial) + dilations = conv_dims_attr(node, "dilations", spatial=spatial) + + # prefix with batch dim of [1] to satisfy rank requirements + input_shape = ctx.get_shape(node.input[0]) + if len(input_shape) == spatial + 1: + gb = GraphBuilder(ctx) + usq_node = gb.make_unsqueeze({"axes": [0], 'data': node.input[0]}, return_node=True) + ctx.replace_inputs(node, [usq_node.output[0]] + node.input[1:]) + + # Set padding. + add_padding( + ctx, node, kernel_shape, strides, dilations=dilations, spatial=spatial + ) + + # Convert input and filters. + conv_convert_inputs(ctx, node, with_kernel=True, spatial=spatial) + + @classmethod + def version_1(cls, ctx, node, **kwargs): + cls.any_version(1, ctx, node, **kwargs) + + @classmethod + def version_11(cls, ctx, node, **kwargs): + # No change. + cls.any_version(11, ctx, node, **kwargs) + + @classmethod + def version_13(cls, ctx, node, **kwargs): + # Signature change for operator Unsqueeze. + cls.any_version(13, ctx, node, **kwargs) + + +def get_shape_from_const_or_concat(ctx, node): + if node.is_const(): + return node.get_tensor_value() + if node.type == 'Concat': + # Sometimes the shape is formed by concating a bunch of consts together + res = [] + if any(ctx.get_shape(inp) != [1] for inp in node.input): + return None + for i, inp in enumerate(node.inputs): + # The concat is converted from a Pack. Conversion adds an unsqueeze to the inputs. + if node.inputs[i].type == 'Unsqueeze' and node.inputs[i].inputs[0].is_scalar(): + res.append(node.inputs[i].inputs[0].get_tensor_value()) + else: + if i == 0: + # For the batch dimension we don't care if it is unknown + res.append(-1) + else: + return None + return res + return None + +@tf_op(["Conv2DBackpropInput", "Conv3DBackpropInputV2"]) +class ConvTranspose: + @classmethod + def version_1(cls, ctx, node, **kwargs): + # T output = Conv2DBackpropInput(int32 input_sizes, T filter, T out_backprop, + # @list(int) strides, @bool use_cudnn_on_gpu, @string padding, @string data_format, @list(int) dilations) + # T Y = ConvTranspose(T X, T W, T B, @STRING auto_pad, @INTS dilations, + # @INT group, @INTS kernel_shape, @INTS output_shape, @INTS pads, @INTS strides) + + if node.type == "Conv3DBackpropInputV2": + spatial = 3 + else: + spatial = 2 + node.type = "ConvTranspose" + # Note: inputs are reversed from what one would expect. + conv_kernel_shape(ctx, node, 1, spatial=spatial) + input_shape = ctx.get_shape(node.input[2]) + output_shape_orig = node.output_shapes + + # ouput_shape is explicitly specified here, in this case pads values are auto generated/calculated. + output_shape = get_shape_from_const_or_concat(ctx, node.inputs[0]) + if output_shape is not None: + #output_shape = ctx.get_shape(node.output[0]) + if is_channels_last(node): + new_output_shape = [output_shape[1], output_shape[2]] + input_dims = [input_shape[1], input_shape[2]] + if spatial == 3: + new_output_shape.append(output_shape[3]) + input_dims.append(input_shape[3]) + else: + new_output_shape = [output_shape[2], output_shape[3]] + input_dims = [input_shape[2], input_shape[3]] + if spatial == 3: + new_output_shape.append(output_shape[4]) + input_dims.append(input_shape[4]) + + utils.make_sure(new_output_shape.count(-1) <= 0, "output dims need to be known") + utils.make_sure(all(new_output_shape[i] >= input_dims[i] for i in range(spatial)), + "output dims cannot be smaller than input dims.") + + node.set_attr("output_shape", new_output_shape) + else: + utils.make_sure(ctx.opset >= 10, "Opset 10 needed for Conv Backprop Input with non-constant shape") + strides = parse_dims_attr(node, node.get_attr('strides').ints, spatial) + use_strides_workaround = any(d > 1 for d in strides) + if use_strides_workaround and ctx.opset < 12: + # When strides > 1, ONNX and TF have an implementation difference in ConvTranspose. ONNX outputs a + # slightly smaller tensor which must be padded with a row of 0s. Pad with dynamic shape requires + # opset >= 11 and Max of int64 needs opset >= 12. Depending on the output_shape, this row of 0s might + # be shaved off, in which case TF and ONNX agree. When output_shape is dynamic it is impossible to + # know at conversion time whether this is the case and the workaround is needed. + logger.warning("Conv Backprop Input with strides > 1 and non-constant shape has known bug. " + "Workaround requires opset 12.") + use_strides_workaround = False + input_shape = ctx.make_node("Cast", [node.input[0]], attr={'to': TensorProto.INT64}) + output_shape = ctx.make_node("Shape", [node.output[0]]) + output_h = GraphBuilder(ctx).make_slice( + {"data": output_shape.output[0], "ends": [2], "starts": [1], "axes": [0]}) + output_w = GraphBuilder(ctx).make_slice( + {"data": output_shape.output[0], "ends": [3], "starts": [2], "axes": [0]}) + expect_h = GraphBuilder(ctx).make_slice( + {"data": input_shape.output[0], "ends": [2], "starts": [1], "axes": [0]}) + expect_w = GraphBuilder(ctx).make_slice( + {"data": input_shape.output[0], "ends": [3], "starts": [2], "axes": [0]}) + diff_h = ctx.make_node("Sub", [output_h, expect_h]) + diff_w = ctx.make_node("Sub", [output_w, expect_w]) + nonneg_diff_h = diff_h + nonneg_diff_w = diff_w + + if use_strides_workaround: + const_zero = ctx.make_const(utils.make_name(node.name + "_const_zero"), np.array([0], dtype=np.int64)) + nonneg_diff_h = ctx.make_node("Max", [diff_h.output[0], const_zero.output[0]]) + nonneg_diff_w = ctx.make_node("Max", [diff_w.output[0], const_zero.output[0]]) + + const_two = ctx.make_const(utils.make_name(node.name + "_const_two"), np.array([2], dtype=np.int64)) + start_h = ctx.make_node("Div", [nonneg_diff_h.output[0], const_two.output[0]]) + start_w = ctx.make_node("Div", [nonneg_diff_w.output[0], const_two.output[0]]) + end_h = ctx.make_node("Add", [start_h.output[0], expect_h]) + end_w = ctx.make_node("Add", [start_w.output[0], expect_w]) + if spatial == 3: + output_d = GraphBuilder(ctx).make_slice( + {"data": output_shape.output[0], "ends": [4], "starts": [3], "axes": [0]}) + expect_d = GraphBuilder(ctx).make_slice( + {"data": input_shape.output[0], "ends": [4], "starts": [3], "axes": [0]}) + diff_d = ctx.make_node("Sub", [output_d, expect_d]) + nonneg_diff_d = diff_d + if use_strides_workaround: + nonneg_diff_d = ctx.make_node("Max", [diff_d.output[0], const_zero.output[0]]) + start_d = ctx.make_node("Div", [nonneg_diff_d.output[0], const_two.output[0]]) + end_d = ctx.make_node("Add", [start_d.output[0], expect_d]) + + starts = ctx.make_node("Concat", [start_h.output[0], start_w.output[0], start_d.output[0]], + attr={"axis": 0}) + ends = ctx.make_node("Concat", [end_h.output[0], end_w.output[0], end_d.output[0]], attr={"axis": 0}) + slice_axes = ctx.make_const(utils.make_name(node.name + "_const_slice_axes"), + np.array([1, 2, 3], dtype=np.int64)) + else: + starts = ctx.make_node("Concat", [start_h.output[0], start_w.output[0]], attr={"axis": 0}) + ends = ctx.make_node("Concat", [end_h.output[0], end_w.output[0]], attr={"axis": 0}) + slice_axes = ctx.make_const(utils.make_name(node.name + "_const_slice_axes"), + np.array([1, 2], dtype=np.int64)) + + slice_node = ctx.make_node("Slice", + [node.output[0], starts.output[0], ends.output[0], slice_axes.output[0]], + shapes=output_shape_orig) + + final_node = slice_node + + if use_strides_workaround: + cz = const_zero.output[0] + + neg_diff_h = ctx.make_node("Neg", [diff_h.output[0]]) + shrink_h_by = ctx.make_node("Max", [neg_diff_h.output[0], const_zero.output[0]]) + shb = shrink_h_by.output[0] + + neg_diff_w = ctx.make_node("Neg", [diff_w.output[0]]) + shrink_w_by = ctx.make_node("Max", [neg_diff_w.output[0], const_zero.output[0]]) + swb = shrink_w_by.output[0] + + if spatial == 3: + neg_diff_d = ctx.make_node("Neg", [diff_d.output[0]]) + shrink_d_by = ctx.make_node("Max", [neg_diff_d.output[0], const_zero.output[0]]) + sdb = shrink_d_by.output[0] + pads = ctx.make_node("Concat", [cz, cz, cz, cz, cz, cz, shb, swb, sdb, cz], attr={"axis": 0}) + padded_node = ctx.make_node("Pad", [slice_node.output[0], pads.output[0]]) + else: + pads = ctx.make_node("Concat", [cz, cz, cz, cz, cz, shb, swb, cz], attr={"axis": 0}) + padded_node = ctx.make_node("Pad", [slice_node.output[0], pads.output[0]]) + + final_node = padded_node + + downstream_nodes = ctx.find_output_consumers(node.output[0]) + downstream_nodes.remove(output_shape) + downstream_nodes.remove(slice_node) + ctx.replace_all_inputs(node.output[0], final_node.output[0], ops=downstream_nodes) + + conv_dims_attr(node, "strides", spatial=spatial) + conv_dims_attr(node, "dilations", spatial=spatial) + + # remove output_shapes input + ctx.remove_input(node, node.input[0], 0) + # swap data and kernel + t = node.input[0] + ctx.replace_input(node, node.input[0], node.input[1], 0) + ctx.replace_input(node, node.input[1], t, 1) + + conv_convert_inputs(ctx, node, with_kernel=True, spatial=spatial) + + @classmethod + def version_11(cls, ctx, node, **kwargs): + cls.version_1(ctx, node, **kwargs) + + +@tf_op(["DepthwiseConv2d", "DepthwiseConv2dNative"]) +class DepthwiseConv2d: + @classmethod + def version_1(cls, ctx, node, **kwargs): + # T output = DepthwiseConv2dNative(T input, T filter, @list(int) strides, @string padding, @string data_format) + # T Y = ConvTranspose(T X, T W, T B, @AttrType.STRING auto_pad, @AttrType.INTS dilations, @AttrType.INT group, + # @AttrType.INTS kernel_shape, @AttrType.INTS output_shape, @AttrType.INTS pads, @AttrType.INTS strides) + # + # this is not documented well in onnx, the hint comes from pytorch documentation: + # http://pytorch.org/docs/master/nn.html#torch.nn.Conv2d + # The configuration when groups == in_channels and out_channels = K * in_channels + # where K is a positive integer is termed in literature as depthwise convolution. + # In other words, for an input of size (N,Cin,Hin,Win), + # if you want a depthwise convolution with a depthwise multiplier K, + # then you use the constructor arguments (in_channels=Cin,out_channels=Cin*K,...,groups=Cin) + # + node.type = "Conv" + input_shape = ctx.get_shape(node.input[0]) + if len(input_shape) != 4: + raise ValueError("only Conv2D is supported") + + kernel_shape = ctx.get_shape(node.input[1]) + if len(kernel_shape) != 4: + raise ValueError("only Conv2D is supported") + k_h, k_w, k_input_channels, k_channel_multiplier = kernel_shape + if "depth_multiplier" in node.attr: + depth_multiplier = node.get_attr_int("depth_multiplier") + k_input_channels //= depth_multiplier + k_channel_multiplier *= depth_multiplier + if k_input_channels < 1: + raise ValueError("input channel must be positive") + k_output_channels = k_input_channels * k_channel_multiplier + + node.set_attr("kernel_shape", [k_h, k_w]) + strides = conv_dims_attr(node, "strides") + dilations = conv_dims_attr(node, "dilations") + node.set_attr("group", k_input_channels) + add_padding(ctx, node, kernel_shape, strides, dilations) + + new_kernel_shape = [k_h, k_w, 1, k_output_channels] + conv_convert_inputs(ctx, node, with_kernel=True, new_kernel_shape=new_kernel_shape) + + +@tf_op(["AvgPool", "AvgPool3D"], onnx_op="AveragePool") +@tf_op(["MaxPool", "MaxPoolV2", "MaxPool3D"], onnx_op="MaxPool") +class PoolOp: + @classmethod + def version_1(cls, ctx, node, **kwargs): + cls._convert(ctx, node, **kwargs) + + @classmethod + def version_10(cls, ctx, node, **kwargs): + cls._convert(ctx, node, **kwargs) + + @classmethod + def version_11(cls, ctx, node, **kwargs): + # no change + cls._convert(ctx, node, **kwargs) + + @classmethod + def _convert(cls, ctx, node, **kwargs): + # T output = MaxPool(T input, @list(int) ksize, @list(int) strides, @string padding, @string data_format) + # T Y = MaxPool(T X, @AttrType.STRING auto_pad, @AttrType.INTS kernel_shape, @AttrType.INTS pads, + # @AttrType.INTS strides) + # above seems wrong - input[1] is ksize, input[2] is strides + # stride and ksize in tf is not always NHWC, so watch out when converting into onnx's NCHW + if kwargs["tf_op"] in ["AvgPool3D", "MaxPool3D"]: + spatial = 3 + else: + spatial = 2 + + origin_dtype = ctx.get_dtype(node.output[0]) + if origin_dtype not in [onnx_pb.TensorProto.FLOAT16, onnx_pb.TensorProto.FLOAT, onnx_pb.TensorProto.DOUBLE]: + # the onnx spec doesn't allow int types for pool ops + input_shapes = [ctx.get_shape(node.input[0])] + output_shapes = [ctx.get_shape(node.output[0])] + cast_node = ctx.make_node("Cast", [node.input[0]], dtypes=[onnx_pb.TensorProto.FLOAT], shapes=input_shapes, + name=node.name + "_cast", attr={"to": onnx_pb.TensorProto.FLOAT}) + _ = ctx.insert_node_on_output(cast_node, node.inputs[0].output[0]) + cast_back_node = ctx.make_node("Cast", [node.output[0]], dtypes=[origin_dtype], shapes=output_shapes, + name=node.name + "_castback", attr={"to": origin_dtype}) + _ = ctx.insert_node_on_output(cast_back_node, node.output[0]) + + if len(node.input) < 3: + kernel_shape_tf = node.get_attr("ksize").ints + strides_tf = node.get_attr("strides").ints + else: + kernel_shape_tf = node.inputs[1].get_tensor_value() + strides_tf = node.inputs[2].get_tensor_value() + ctx.remove_input(node, node.input[2], 2) + ctx.remove_input(node, node.input[1], 1) + + kernel_shape_hw = parse_dims_attr(node, kernel_shape_tf, spatial) + strides_hw = parse_dims_attr(node, strides_tf, spatial) + + node.set_attr("kernel_shape", kernel_shape_hw) + node.set_attr("strides", strides_hw) + dilations = conv_dims_attr(node, "dilations", spatial=spatial) + add_padding(ctx, node, kernel_shape_hw, strides_hw, dilations=dilations, spatial=spatial) + conv_convert_inputs(ctx, node, with_kernel=False, spatial=spatial) + + +@tf_op(["MaxPoolWithArgmax"], onnx_op="MaxPool") +class MaxPoolWithArgmaxOp: + @classmethod + def version_8(cls, ctx, node, **kwargs): + # T output = MaxPool(T input, @list(int) ksize, @list(int) strides, @string padding, @string data_format) + + # Set kernel_shape attribute + kernel_shape = node.get_attr("ksize").ints + kernel_shape = [kernel_shape[1], kernel_shape[2]] + node.set_attr("kernel_shape", kernel_shape) + + # Set strides attribute + strides = node.get_attr("strides").ints + strides = [strides[1], strides[2]] + node.set_attr("strides", strides) + + # The input data_format is NHWC for TF MaxPoolWithArgmax + node.set_attr("data_format", "NHWC") + + add_padding(ctx, node, kernel_shape, strides) + conv_convert_inputs(ctx, node, with_kernel=False, input_indices=[0], output_indices=[0, 1]) + + +@tf_op(["BiasAdd", "BiasAddV1"]) +class BiasAdd: + @classmethod + def version_1(cls, ctx, node, **kwargs): + # T output = BiasAdd(T value, T bias, @string data_format) + # T output = BiasAddV1(T value, T bias) + # TODO: for now use add. We may need to convert to NCHW. + node.type = "Add" + common.BroadcastOp.version_1(ctx, node, **kwargs) + + @classmethod + def version_7(cls, ctx, node, **kwargs): + # T output = BiasAdd(T value, T bias, @string data_format) + # T output = BiasAddV1(T value, T bias) + # According TF bias_add definition, the input dim is always only 1. + node.type = "Add" + common.BroadcastOp.version_6(ctx, node, **kwargs) + + # on NHWC, bias will broadcast from largest dim, which is default onnx Add op broadcast behavior. + if not node.is_nhwc(): + # however, in NCHW, bias should be at 2nd dim, which by default onnx Add op has no way to know, + # so it needs being reshaped into 3-dim tensor before add + shape0 = ctx.get_shape(node.input[0]) + shape1 = ctx.get_shape(node.input[1]) + if node.inputs[1].type == 'Const' and len(shape1) == 1: + new_broadcast_shape = [shape1[0]] + [1] * (len(shape0) - 2) + shape_name = utils.make_name(node.name) + ctx.make_const(shape_name, np.array(new_broadcast_shape, dtype=np.int64)) + op_name = node.input[1] + reshape_node = ctx.make_node("Reshape", [op_name, shape_name]) + ctx.replace_input(node, op_name, reshape_node.output[0], 1) + ctx.set_shape(reshape_node.output[0], new_broadcast_shape) + + +@tf_op(["Pad", "PadV2", "MirrorPad"], onnx_op="Pad") +class Pad: + @classmethod + def version_1(cls, ctx, node, **kwargs): + node.type = "Pad" + # T output = Pad(T input, int32 paddings, @type Tpaddings), CONST model using default value + # or PadV2(T input, int32 paddings, T constant_value, @type Tpaddings), CONST mode - default value specified + # or MirrorPad(T input, int32 paddings, @type Tpaddings, @STRING mode), other mode. + # T output = Pad(T data, @STRING mode, @INTS pads, @FLOAT value) + paddings = np.array(node.inputs[1].get_tensor_value()).transpose().flatten() + mode = node.get_attr("mode") + if mode: + mode = mode.s.decode("utf-8").lower() + node.set_attr("mode", mode) + if mode not in [None, "constant", "reflect"]: + raise ValueError(mode + " pad mode is not supported") + + if mode in [None, "constant"] and len(node.input) == 3: + const_val = node.inputs[2].get_tensor_value() + node.set_attr("value", const_val) + ctx.remove_input(node, node.input[2], 2) + + ctx.remove_input(node, node.input[1], 1) + node.set_attr("pads", paddings) + + origin_dtype = ctx.get_dtype(node.output[0]) + if origin_dtype not in [onnx_pb.TensorProto.FLOAT16, onnx_pb.TensorProto.FLOAT, + onnx_pb.TensorProto.DOUBLE]: + cast_node = ctx.insert_new_node_on_input(node, "Cast", node.input[0], to=onnx_pb.TensorProto.FLOAT) + ctx.set_dtype(cast_node.output[0], onnx_pb.TensorProto.FLOAT) + ctx.copy_shape(node.name, cast_node.output[0]) + + cast_back_node = ctx.insert_new_node_on_output("Cast", node.output[0], + name=utils.make_name(node.name) + "_castback", + to=origin_dtype) + ctx.set_dtype(cast_back_node.output[0], origin_dtype) + ctx.copy_shape(node.name, cast_back_node.output[0]) + + @classmethod + def version_11(cls, ctx, node, **kwargs): + mode = node.get_attr("mode") + if mode: + mode = mode.s.decode("utf-8").lower() + node.set_attr("mode", mode) + if mode not in [None, "constant", "reflect"]: + raise ValueError(mode + " pad mode is not supported") + + # pads must be int64. + if ctx.get_dtype(node.input[1]) != onnx_pb.TensorProto.INT64: + ctx.insert_new_node_on_input(node, "Cast", node.input[1], to=onnx_pb.TensorProto.INT64) + ctx.insert_new_node_on_input(node, "Transpose", node.input[1]) + shape_const = ctx.make_const(utils.make_name(node.name), np.array([-1]).astype(np.int64)) + ctx.insert_new_node_on_input(node, "Reshape", [node.input[1], shape_const.name]) + + origin_dtype = ctx.get_dtype(node.output[0]) + if origin_dtype not in [TensorProto.FLOAT, TensorProto.DOUBLE, + TensorProto.INT32, TensorProto.INT64]: + cast_node = ctx.insert_new_node_on_input(node, "Cast", node.input[0], to=TensorProto.FLOAT) + ctx.set_dtype(cast_node.output[0], TensorProto.FLOAT) + ctx.copy_shape(node.name, cast_node.output[0]) + + cast_back_node = ctx.insert_new_node_on_output("Cast", node.output[0], + name=utils.make_name(node.name) + "_castback", + to=origin_dtype) + ctx.set_dtype(cast_back_node.output[0], origin_dtype) + ctx.copy_shape(node.name, cast_back_node.output[0]) + + +@tf_op(["FusedBatchNorm", "FusedBatchNormV2", "FusedBatchNormV3"]) +class BatchNorm: + @classmethod + def version_6(cls, ctx, node, **kwargs): + tf_type = node.type + node.type = "BatchNormalization" + # tf inputs: x, scale, bias, mean, variance + # tf outputs: y, batch_mean, batch_var + # a: data_format, epsilon, is_training + # onnx inputs: X, scale, B, mean, variance, attributes: epsilon, momentum=0.9, spatial : 1 + # output: y, mean, var, savedmean, savedvar, + # detach unused outputs. While we could let the unused outputs dangle, + # some runtimes like pytorch/caffe2 do complain about it. + + # onnx batchnorm requires same T for all inputs + mean_type = ctx.get_dtype(node.input[3]) + x_dtype = ctx.get_dtype(node.input[0]) + if x_dtype != mean_type: + # TODO: this works but more efficient would be to flip the other inputs. We'd need to check + # TODO: first if this works with the onnx implementation so its a later for now + ctx.insert_new_node_on_input(node, "Cast", node.input[0], to=mean_type) + # casting the input[0] will change the output dtype of bn so we need to cast back + cast_back_node = ctx.insert_new_node_on_output("Cast", node.output[0], + name=utils.make_name(node.name) + "_castback", + to=x_dtype) + ctx.set_dtype(cast_back_node.output[0], x_dtype) + ctx.copy_shape(node.name, cast_back_node.output[0]) + + consumers = [ctx.find_output_consumers(output_name) for output_name in node.output[1:]] + if not any(consumers): + new_output = [node.output[0]] + # the setter makes a copy of new_output + node.output = new_output + + conv_convert_inputs(ctx, node, with_kernel=False) + + inp_shape = ctx.get_shape(node.input[0]) + inp_rank = len(inp_shape) if inp_shape is not None else None + scale_shape = ctx.get_shape(node.input[1]) + mean_shape = ctx.get_shape(node.input[3]) + var_shape = ctx.get_shape(node.input[4]) + val_type = utils.map_onnx_to_numpy_type(ctx.get_dtype(node.input[1])) + is_training = node.get_attr_value('is_training', True) + + if is_training and node.get_attr_value('exponential_avg_factor', 1.0) == 1.0: + # Sometimes TF uses a BatchNorm op with training = True and exponential_avg_factor = 1.0 + # to perform layer mean/variance normalization. In such cases, the mean/var are computed from the input. + # TF allows mean/variance to be excluded only if is_training and exponential_avg_factor == 1.0 + utils.make_sure(inp_rank is not None, "Cannot convert node %s of type %s with input of unknown rank.", + node.name, tf_type) + dims = [0] + list(range(2, inp_rank)) + avg = ctx.make_node("ReduceMean", [node.input[0]], attr={'axes': dims, 'keepdims': True}).output[0] + avg_squeezed = GraphBuilder(ctx).make_squeeze({"data": avg, "axes": dims}) + sub = ctx.make_node("Sub", [node.input[0], avg]).output[0] + var_squeezed = ctx.make_node("ReduceSumSquare", [sub], attr={'axes': dims, 'keepdims': False}).output[0] + + inp_shape = ctx.make_node("Shape", [node.input[0]]).output[0] + dims_const = ctx.make_const(utils.make_name("axes_const"), np.array(dims, dtype=np.int64)).output[0] + reduce_dims = ctx.make_node("Gather", [inp_shape, dims_const]).output[0] + dims_product = ctx.make_node("ReduceProd", [reduce_dims], attr={'axes': [0], 'keepdims': False}) + cnt_float = ctx.make_node("Cast", [dims_product.output[0]], attr={'to': ctx.get_dtype(node.input[0])}) + + pop_var_squeezed = ctx.make_node("Div", [var_squeezed, cnt_float.output[0]]).output[0] + ctx.replace_inputs(node, node.input[:3] + [avg_squeezed, pop_var_squeezed]) + elif is_training: + logger.warning("Node %s of type %s has is_training set to true, which is not supperted. " + "Please re-save the model with training set to false.", + node.name, tf_type) + # As long as the mean/variance estimates are provided, we should be OK + is_training = False + + if not is_training and mean_shape != scale_shape and all(d >= 0 for d in scale_shape): + new_mean_value = np.array(np.resize(node.inputs[3].get_tensor_value(as_list=False), scale_shape), + dtype=val_type) + new_mean_node_name = utils.make_name(node.name) + ctx.make_const(new_mean_node_name, new_mean_value) + ctx.replace_input(node, node.input[3], new_mean_node_name, 3) + + if not is_training and var_shape != scale_shape and all(d >= 0 for d in scale_shape): + new_var_value = np.array(np.resize(node.inputs[4].get_tensor_value(as_list=False), scale_shape), + dtype=val_type) + new_val_node_name = utils.make_name(node.name) + ctx.make_const(new_val_node_name, new_var_value) + ctx.replace_input(node, node.input[4], new_val_node_name, 4) + + @classmethod + def version_9(cls, ctx, node, **kwargs): + # is_test was removed - no change for us + cls.version_6(ctx, node, **kwargs) + + +@tf_op(["SpaceToDepth"]) +class SpaceToDepth: + @classmethod + def version_1(cls, ctx, node, **kwargs): + block_size = node.get_attr("block_size") + node.set_attr("blocksize", block_size.i) + conv_convert_inputs(ctx, node, with_kernel=False) + + +@tf_op(["DepthToSpace"]) +class DepthToSpace: + @classmethod + def version_1(cls, ctx, node, **kwargs): + block_size = node.get_attr("block_size") + node.set_attr("blocksize", block_size.i) + conv_convert_inputs(ctx, node, with_kernel=False) + + @classmethod + def version_11(cls, ctx, node, **kwargs): + # Onnx-11 CRD mode added. No change for tf2onnx + cls.version_1(ctx, node, **kwargs) + + +@tf_op(["CropAndResize"]) +class CropAndResize: + @classmethod + def version_10(cls, ctx, node, **kwargs): + utils.make_sure(node.inputs[1].type == "Const", "boxes input must be a Const") + utils.make_sure(node.inputs[3].type == "Const", "boxes input must be a Const") + name = node.name + output_height = node.inputs[3].get_tensor_value()[0] + output_width = node.inputs[3].get_tensor_value()[1] + rois = node.inputs[1].get_tensor_value() + rois_shape = ctx.get_shape(node.input[1]) + img_shape = ctx.get_shape(node.input[0]) + transform_rois = np.zeros(list(rois_shape), dtype=np.float32) + for i in range(rois_shape[0]): + y1, x1, y2, x2 = rois[i] + y1 = y1 * (img_shape[1] - 1) + y2 = y2 * (img_shape[1] - 1) + x1 = x1 * (img_shape[2] - 1) + x2 = x2 * (img_shape[2] - 1) + spacing_h = (y2 - y1) + spacing_w = (x2 - x1) + b1 = y1 - 0.5 * spacing_h / (output_height - 1) + a1 = x1 - 0.5 * spacing_w / (output_width - 1) + b2 = y2 + 0.5 * spacing_h / (output_height - 1) + a2 = x2 + 0.5 * spacing_w / (output_width - 1) + transform_rois[i][0] = a1 + transform_rois[i][1] = b1 + transform_rois[i][2] = a2 + transform_rois[i][3] = b2 + cast_node = ctx.make_node("Cast", [node.input[2]], attr={"to": onnx_pb.TensorProto.INT64}) + bbox_node = ctx.make_const(utils.make_name("bbox"), transform_rois) + dtypes = [ctx.get_dtype(node.output[0])] + shapes = [ctx.get_shape(node.output[0])] + input_nchw = ctx.make_node("Transpose", [node.input[0]], {"perm": [0, 3, 1, 2]}, + name=utils.make_name(node.name)) + crop_and_resize = ctx.make_node("RoiAlign", inputs=[input_nchw.output[0], bbox_node.output[0], + cast_node.output[0]], + attr={"output_height": output_height, "output_width": output_width, + "spatial_scale": 1.0, "sampling_ratio": 1}, + name=utils.make_name(node.name), dtypes=dtypes, shapes=shapes) + ctx.remove_node(name) + ctx.make_node("Transpose", crop_and_resize.output, {"perm": [0, 2, 3, 1]}, + name=name, outputs=node.output, shapes=shapes, dtypes=dtypes) + + @classmethod + def any_version_after11(cls, opset, ctx, node, **kwargs): + # create loop of resize to cater to tensorflow CropAndResize, one box one iteration + mode = "nearest" if node.get_attr("method") is not None and node.get_attr( + "method").s == b"nearest" else "linear" + extrapolation_value = float(node.get_attr("extrapolation_value", "0").f) + input_x = node.input[0] + boxes = node.input[1] + box_ind = node.input[2] + crop_size = node.input[3] + trip_name = utils.make_name(node.name + "_i") + cond_name = utils.make_name(node.name + "_cond") + cond_out_name = utils.make_name(node.name + "cond_out") + g = ctx.create_new_graph_with_same_config() + g.add_graph_input(trip_name, TensorProto.INT64, [1]) + g.add_graph_input(cond_name, TensorProto.BOOL, []) + g.parent_graph = ctx + const_zero = g.make_const(utils.make_name(node.name + "_const_zero"), np.array([0], dtype=np.int32)) + const_zero_long = g.make_const(utils.make_name(node.name + "_const_zero_long"), np.array([0], dtype=np.int64)) + const_one = g.make_const(utils.make_name(node.name + "_const_one"), np.array([1], dtype=np.int32)) + const_one_long = g.make_const(utils.make_name(node.name + "_const_one_long"), np.array([1], dtype=np.int64)) + index_end = g.make_node("Add", [trip_name, const_one_long.output[0]]) + box_index_from = g.make_node("Slice", [box_ind, trip_name, index_end.output[0]], name="Slice_a") + box_index_to = g.make_node("Add", [box_index_from.output[0], const_one.output[0]]) + target_x = g.make_node("Slice", [input_x, box_index_from.output[0], box_index_to.output[0], + const_zero.output[0]], name="Slice_b") + transposed_x = g.make_node("Transpose", [target_x.output[0]], attr={'perm': constants.NHWC_TO_NCHW}) + const_zero_zero = g.make_const(utils.make_name(node.name + "_const_zero_zero"), + np.array([0, 0], dtype=np.float32)) + const_one_one = g.make_const(utils.make_name(node.name + "_const_one_one"), + np.array([1, 1], dtype=np.float32)) + const_four = g.make_const(utils.make_name(node.name + "_const_four"), np.array([4], dtype=np.int64)) + const_empty_float = g.make_const(utils.make_name("const_empty_float"), np.array([], dtype=np.float32)) + box = g.make_node("Slice", [boxes, trip_name, index_end.output[0], const_zero_long.output[0]], + name="Slice_c") + roi_raw = g.make_node("Reshape", [box.output[0], const_four.output[0]]) + roi_raw_first_half = GraphBuilder(g).make_slice({"data": roi_raw.output[0], "ends": [2], "starts": [0]}) + roi_raw_second_half = GraphBuilder(g).make_slice({"data": roi_raw.output[0], "ends": [4], "starts": [2]}) + roi_concat_1 = g.make_node("Concat", [const_zero_zero.output[0], roi_raw_first_half], attr={'axis': 0}) + roi_concat_2 = g.make_node("Concat", [const_one_one.output[0], roi_raw_second_half], attr={'axis': 0}) + final_roi = g.make_node("Concat", [roi_concat_1.output[0], roi_concat_2.output[0]], attr={'axis': 0}) + final_crop_size = build_dynamic_target_size(g, transposed_x.output[0], crop_size) + resized_x = g.make_node("Resize", [transposed_x.output[0], final_roi.output[0], const_empty_float.output[0], + final_crop_size.output[0]], + attr={"mode": mode, "extrapolation_value": extrapolation_value, + "coordinate_transformation_mode": "tf_crop_and_resize"}) + recovered_x = g.make_node("Transpose", [resized_x.output[0]], attr={'perm': constants.NCHW_TO_NHWC}) + squeeze_x = GraphBuilder(g).make_squeeze({'data': recovered_x.output[0], 'axes': [0]}, return_node=True) + g.make_node("Identity", [cond_name], outputs=[cond_out_name]) + g.add_graph_output(cond_out_name, TensorProto.BOOL, []) + g.add_graph_output(squeeze_x.output[0], ctx.get_dtype(node.input[0]), [-1, -1, -1]) + trip_node = ctx.make_node("Size", [box_ind]) + cond_const = ctx.make_const(utils.make_name("cond"), np.ones((), dtype=np.bool)) + ctx.remove_node(node.name) + branches = {"body": g} + inner_loop = ctx.make_node("Loop", [trip_node.output[0], cond_const.output[0]], name=node.name, + outputs=node.output, branches=branches) + + @classmethod + def version_11(cls, ctx, node, **kwargs): + cls.any_version_after11(11, ctx, node, **kwargs) + + @classmethod + def version_13(cls, ctx, node, **kwargs): + # Signature of operator Squeeze changed. + cls.any_version_after11(13, ctx, node, **kwargs) + + +@tf_op(["ResizeBilinear", "ResizeNearestNeighbor", "ResizeBicubic"]) +class Resize: + @classmethod + def version_7(cls, ctx, node, **kwargs): + utils.make_sure(node.type != "ResizeBicubic", "Opset 11 is required for bicubic interpolation for node %s", + node.name) + mode = "linear" if node.type == "ResizeBilinear" else "nearest" + node.type = "Upsample" + shape = ctx.get_shape(node.input[0]) + target_shape = node.inputs[1].get_tensor_value() + # https://www.tensorflow.org/api_docs/python/tf/image/resize_nearest_neighbor + # wants the input to be NHWC - adjust target_shape to this. + n, h, w, c = shape + nh, nw = target_shape + utils.make_sure(all(i != -1 for i in [nh, nw]), "h and w need to be known") + # scaler is nchw + scaler = [1., 1., float(nh) / h, float(nw) / w] + node.set_attr("scales", scaler) + node.set_attr("mode", mode) + ctx.remove_input(node, node.input[1], 1) + node.data_format = "NHWC" + conv_convert_inputs(ctx, node, with_kernel=False) + + @classmethod + def version_9(cls, ctx, node, **kwargs): + cls._convert_since_9(ctx, node, op_type="Upsample") + + @classmethod + def version_10(cls, ctx, node, **kwargs): + cls._convert_since_9(ctx, node, op_type="Resize") + + @classmethod + def version_11(cls, ctx, node, **kwargs): + cubic_coeff_a = None + exclude_outside = False + if node.type == "ResizeBilinear": + mode = "linear" + elif node.type == "ResizeBicubic": + mode = "cubic" + cubic_coeff_a = -0.5 + exclude_outside = True + else: + mode = "nearest" + roi = ctx.make_const(utils.make_name("roi"), np.array([]).astype(np.float32)) + const_zero = ctx.make_const(utils.make_name("const_zero"), np.array([0]).astype(np.int64)) + const_two = ctx.make_const(utils.make_name("const_two"), np.array([2]).astype(np.int64)) + const_empty_float = ctx.make_const(utils.make_name("const_empty_float"), np.array([]).astype(np.float32)) + input_nchw = ctx.make_node("Transpose", [node.input[0]], {"perm": constants.NHWC_TO_NCHW}) + shape_input = ctx.make_node("Shape", [input_nchw.output[0]]) + sliced_shape = ctx.make_node("Slice", [shape_input.output[0], const_zero.output[0], const_two.output[0]]) + size_int64 = ctx.make_node("Cast", [node.input[1]], attr={"to": onnx_pb.TensorProto.INT64}) + concat_shape = ctx.make_node("Concat", [sliced_shape.output[0], size_int64.output[0]], {'axis': 0}) + resize_inputs = [ + input_nchw.output[0], + roi.output[0], + const_empty_float.output[0], + concat_shape.output[0] + ] + transformation_mode = "asymmetric" + nearest_mode = "floor" + if "align_corners" in node.attr and node.attr["align_corners"].i: + transformation_mode = "align_corners" + if "half_pixel_centers" in node.attr and node.attr["half_pixel_centers"].i: + if node.type == "ResizeNearestNeighbor" and not ctx.is_target(constants.TARGET_TENSORRT): + # TensorRT only supports nearest_mode = "floor" for mode = "nearest" + transformation_mode = "half_pixel" + nearest_mode = "round_prefer_ceil" + else: + transformation_mode = "half_pixel" + attr = {"mode": mode, "nearest_mode": nearest_mode, "coordinate_transformation_mode": transformation_mode, + "exclude_outside": exclude_outside} + if cubic_coeff_a is not None: + attr["cubic_coeff_a"] = cubic_coeff_a + resize = ctx.make_node("Resize", resize_inputs, attr=attr) + shapes = node.output_shapes + dtypes = node.output_dtypes + ctx.remove_node(node.name) + ctx.make_node("Transpose", resize.output, {"perm": constants.NCHW_TO_NHWC}, + name=node.name, outputs=node.output, shapes=shapes, dtypes=dtypes) + + @classmethod + def _convert_since_9(cls, ctx, node, op_type, use_target_size=False): + + # float32 out = ResizeBilinear/ResizeNearestNeighbor(T images, int size) + # https://www.tensorflow.org/api_docs/python/tf/image/resize_nearest_neighbor + # wants the input to be NHWC - adjust target_shape to this. + utils.make_sure(node.type != "ResizeBicubic", "Opset 11 is required for bicubic interpolation for node %s", + node.name) + mode = "linear" if node.type == "ResizeBilinear" else "nearest" + + # because onnxruntime only supports to scale the last two dims so transpose is inserted + input_nchw = ctx.make_node("Transpose", [node.input[0]], {"perm": constants.NHWC_TO_NCHW}) + if use_target_size: + final_target_size = build_dynamic_target_size(ctx, input_nchw.output[0], node.input[1]) + roi = ctx.make_const(utils.make_name("roi"), np.array([]).astype(np.float32)) + const_empty_float = ctx.make_const(utils.make_name("const_empty_float"), np.array([], dtype=np.float32)) + resize_inputs = [ + input_nchw.output[0], + roi.output[0], + const_empty_float.output[0], + final_target_size.output[0] + ] + upsample = ctx.make_node("Resize", resize_inputs, + attr={"mode": mode, "nearest_mode": "floor", + "coordinate_transformation_mode": "asymmetric"}) + else: + # first create "scales" info for onnx upsample + # if shape of input and output known then "scale" is calculated statically and set as a const node + shape = ctx.get_shape(node.input[0]) + if shape and shape[2] != -1 and shape[1] != -1 and node.inputs[1].is_const(): + target_shape = node.inputs[1].get_tensor_value() + n, h, w, c = shape + nh, nw = target_shape + # scales is nchw + # the reason not storing data at raw field is because of the bug: + # https://github.com/onnx/onnx/issues/1852 + scale_val = np.array([1.0, 1.0, float(nh) / h, float(nw) / w]).astype(np.float32) + scales = ctx.make_const(utils.make_name("scales"), scale_val, raw=False) + else: + ori_shape = ctx.make_node("Shape", [node.input[0]]) + attr = {"axes": [0], "starts": [1], "ends": [3]} + inputs_map = {"data": ori_shape.output[0], **attr} + ori_shape_hw = GraphBuilder(ctx).make_slice(inputs_map) + ori_shape_hw_float = ctx.make_node("Cast", [ori_shape_hw], attr={"to": onnx_pb.TensorProto.FLOAT}) + + target_hw = node.inputs[1] + target_hw_float = ctx.make_node("Cast", target_hw.output, attr={"to": onnx_pb.TensorProto.FLOAT}) + + scales_hw = ctx.make_node("Div", [target_hw_float.output[0], ori_shape_hw_float.output[0]]) + + const_one_array = ctx.make_const(utils.make_name("one"), np.array([1.0, 1.0]).astype(np.float32)) + # scales is nchw + scales = ctx.make_node("Concat", [const_one_array.output[0], scales_hw.output[0]], {"axis": 0}) + upsample = ctx.make_node(op_type, [input_nchw.output[0], scales.output[0]], attr={"mode": mode}) + + shapes = node.output_shapes + dtypes = node.output_dtypes + ctx.remove_node(node.name) + ctx.make_node("Transpose", upsample.output, {"perm": constants.NCHW_TO_NHWC}, + name=node.name, outputs=node.output, shapes=shapes, dtypes=dtypes) + + +@tf_op("AdjustContrastv2") +class AdjustContrastv2: + @classmethod + def version_1(cls, ctx, node, **kwargs): + images, contrast_factor = node.input + dtype = ctx.get_dtype(images) + if ctx.get_dtype(contrast_factor) != dtype: + contrast_factor = ctx.make_node("Cast", [dtype], attr={'to': dtype}).output[0] + rank = ctx.get_rank(images) + utils.make_sure(rank is not None, "AdjustContrastv2 requires input of known rank") + # Reduce everything except channels + axes_to_reduce = list(range(rank))[:-1] + mean = ctx.make_node("ReduceMean", [images], attr={'axes': axes_to_reduce, 'keepdims': True}, + op_name_scope=node.name).output[0] + diff = ctx.make_node("Sub", [images, mean], op_name_scope=node.name).output[0] + scaled = ctx.make_node("Mul", [diff, contrast_factor], op_name_scope=node.name).output[0] + result = ctx.make_node("Add", [scaled, mean], op_name_scope=node.name).output[0] + ctx.replace_all_inputs(node.output[0], result) + ctx.remove_node(node.name) + + +@tf_op("AdjustSaturation") +class AdjustSaturation: + @classmethod + def version_11(cls, ctx, node, **kwargs): + images, factor = node.input + dtype = ctx.get_dtype(images) + np_dtype = utils.map_onnx_to_numpy_type(dtype) + k = ctx.make_const(utils.make_name("three"), np.array([3], np.int64)).output[0] + ordered, indices = ctx.make_node("TopK", [images, k], attr={'axis': -1}, output_count=2).output + # Sorted and separated into channels + max_c, mid_c, min_c = ctx.make_node("Split", [ordered], attr={'axis': -1}, output_count=3).output + delta = ctx.make_node("Sub", [max_c, min_c]).output[0] + scaled_delta = ctx.make_node("Mul", [delta, factor], op_name_scope=node.name).output[0] + new_delta = ctx.make_node("Min", [scaled_delta, max_c]).output[0] + new_min = ctx.make_node("Sub", [max_c, new_delta]).output[0] + delta2 = ctx.make_node("Sub", [mid_c, min_c]).output[0] + const_zero = ctx.make_const(utils.make_name("zero"), np.array(0, np_dtype)).output[0] + delta_z = ctx.make_node("Equal", [delta, const_zero]).output[0] + delta_z_cast = ctx.make_node("Cast", [delta_z], attr={'to': dtype}).output[0] + delta_nz = ctx.make_node("Add", [delta, delta_z_cast]).output[0] + delta2_scale = ctx.make_node("Div", [new_delta, delta_nz]).output[0] + new_delta2 = ctx.make_node("Mul", [delta2, delta2_scale], op_name_scope=node.name).output[0] + new_mid = ctx.make_node("Add", [new_min, new_delta2]).output[0] + new_ordered = ctx.make_node("Concat", [max_c, new_mid, new_min], attr={'axis': -1}).output[0] + # Now put it back in order + result = ctx.make_node("GatherElements", [new_ordered, indices], attr={'axis': -1}).output[0] + ctx.replace_all_inputs(node.output[0], result) + ctx.remove_node(node.name) + + +@tf_op("MatrixBandPart") +class MatrixBandPart: + @classmethod + def version_7(cls, ctx, node, **kwargs): + # T output = MatrixBandPart(T input, int num_lower, int num_upper) + # data-flow: first generate mask matrix and then use element-wise mul op + input_rank = len(ctx.get_shape(node.input[0])) + utils.make_sure(input_rank == 2, error_msg="MatrixBandPart op: only rank 2 is supported") + bandpart = [node.inputs[ind].get_tensor_value() for ind in [1, 2]] + utils.make_sure(bandpart in [[-1, 0], [0, -1]], "only support Lower/Upper triangular for opset < 11") + # methods to generate mask matrix: if lower triangular is needed, then generate column one by one + # otherwise row is generated one by one. + axis, counter_axis, squeeze_axis = (1, 0, 2) if bandpart == [-1, 0] else (0, 1, 1) + # 1: subgraph to implement tf.onelike(input[:, 0]), + # no need to worry about the dtype, because bool type is needed as Xor only support bool + node_name = utils.make_name("const_zero") + const_zero = ctx.make_const(name=node_name, np_val=np.array([0]).astype(np.int32)) + first_col_or_row = ctx.make_node(op_type="Gather", inputs=[node.input[0], const_zero.output[0]], + attr={"axis": axis}) + first_col_or_row_casted = ctx.make_node(op_type="Cast", inputs=first_col_or_row.output, + attr={"to": onnx_pb.TensorProto.BOOL}) + # line means one col or one row + zero_line = ctx.make_node(op_type="Xor", inputs=first_col_or_row_casted.output * 2) + one_line = ctx.make_node(op_type="Not", inputs=zero_line.output) + + # 2: "loop" to generate mask matrix: generate col or row of matrix one by one + g = ctx.create_new_graph_with_same_config() + node_name = utils.make_name("const_zero_bool") + const_zero_bool = g.make_const(name=node_name, np_val=np.array([[0]]).astype(np.bool)) + g.set_dtype(const_zero_bool.output[0], onnx_pb.TensorProto.BOOL) + + g.add_graph_input("trip", onnx_pb.TensorProto.INT64, []) + g.add_graph_input("cond", onnx_pb.TensorProto.BOOL, []) + g.add_graph_input("line", onnx_pb.TensorProto.BOOL, [-1, -1]) + + # shift right the line and add zero at the left. + new_line = g.make_node(op_type="Concat", inputs=[const_zero_bool.output[0], "line"], + attr={"axis": counter_axis}, + dtypes=[onnx_pb.TensorProto.BOOL]) + attr = {"axes": [counter_axis], "starts": [0], "ends": [-1]} + inputs_map = {"data": new_line.output[0], **attr} + slice_node = GraphBuilder(g).make_slice(inputs_map) + + g.make_node("Identity", ["cond"], outputs=["cond_out"]) + g.make_node("Identity", ["line"], outputs=["res"]) + g.make_node("Identity", [slice_node], outputs=["line_out"]) + + g.add_graph_output("cond_out", onnx_pb.TensorProto.BOOL, []) + g.add_graph_output("line_out", onnx_pb.TensorProto.BOOL, [-1, -1]) + g.add_graph_output("res", onnx_pb.TensorProto.BOOL, [-1, -1]) + + # initial value of body vars + shape = ctx.make_node(op_type="Shape", inputs=[node.input[0]]) # dtype of result is int64 + node_name = utils.make_name("line_num_index") + col_or_row_num_index = ctx.make_const(name=node_name, np_val=np.array(axis).astype(np.int32)) + line_num = ctx.make_node(op_type="Gather", inputs=[shape.output[0], col_or_row_num_index.output[0]]) + trip_cnt = line_num.output[0] + node_name = utils.make_name("true") + cond = ctx.make_const(name=node_name, np_val=np.array(1).astype(np.bool)) + col_init = one_line.output[0] + + branches = {"body": g} + loop_node = ctx.make_node(op_type="Loop", inputs=[trip_cnt, cond.output[0], col_init], + output_count=2, branches=branches) + # convert generated mask matrix from bool to right shape and data type + squeeze = GraphBuilder(ctx).make_squeeze( + {'data': loop_node.output[1], 'axes': [squeeze_axis]}, return_node=True) + cast1 = ctx.make_node(op_type="Cast", inputs=squeeze.output, attr={"to": onnx_pb.TensorProto.FLOAT}) + if axis == 1: + mask_matrix = ctx.make_node(op_type="Transpose", inputs=cast1.output) + else: + mask_matrix = squeeze + cast2 = ctx.make_node(op_type="Cast", inputs=mask_matrix.output, + attr={"to": ctx.get_dtype(node.input[0])}) + shapes = node.output_shapes + dtypes = node.output_dtypes + ctx.remove_node(node.name) + ctx.make_node(op_type="Mul", inputs=[cast2.output[0], node.input[0]], + name=node.name, outputs=node.output, shapes=shapes, + dtypes=dtypes) + + @classmethod + def version_11(cls, ctx, node, **kwargs): + num_lower_const = node.inputs[1].get_tensor_value() if node.inputs[1].is_const() else None + num_upper_const = node.inputs[2].get_tensor_value() if node.inputs[2].is_const() else None + data, num_lower, num_upper = node.input + rank = ctx.get_rank(data) + int_max_val = utils.get_max_value(np.int64) + dtype = ctx.get_dtype(data) + if rank == 2: + shape = ctx.make_node("Shape", [data]).output[0] + else: + whole_shape = ctx.make_node("Shape", [data]).output[0] + shape = GraphBuilder(ctx).make_slice( + {'data': whole_shape, 'starts': [-2], 'ends': [int_max_val], 'axes': [0]}) + if num_lower_const == 0 and num_upper_const == 0: + if rank == 2: + identity_node = ctx.make_node("EyeLike", [data]).output[0] + else: + zero_tensor = helper.make_tensor("value", dtype, dims=[1], vals=[0]) + const_of_shape = ctx.make_node("ConstantOfShape", [shape], attr={'value': zero_tensor}).output[0] + identity_node = ctx.make_node("EyeLike", [const_of_shape]).output[0] + shapes = node.output_shapes + dtypes = node.output_dtypes + ctx.remove_node(node.name) + ctx.make_node(op_type="Mul", inputs=[identity_node, data], + name=node.name, outputs=node.output, shapes=shapes, + dtypes=dtypes) + return + zero_const = ctx.make_const(utils.make_name("zero"), np.array(0, np.int64)).output[0] + one_const = ctx.make_const(utils.make_name("one"), np.array(1, np.int64)).output[0] + conditions = [] + row_cnt = GraphBuilder(ctx).make_slice({'data': shape, 'axes': [0], 'starts': [0], 'ends': [1]}) + col_cnt = GraphBuilder(ctx).make_slice({'data': shape, 'axes': [0], 'starts': [1], 'ends': [2]}) + limit = ctx.make_node("Mul", [row_cnt, col_cnt]).output[0] + # idx_cnt = ctx.make_node("Range", [zero_const, limit, one_const]).output[0] + + ones_of_shape = ctx.make_node("Expand", [one_const, limit]).output[0] + idx_cnt = ctx.make_node("CumSum", [ones_of_shape, zero_const], attr={'exclusive': True}).output[0] + + idx_reshape = ctx.make_node("Reshape", [idx_cnt, shape]).output[0] + row_idx = ctx.make_node("Div", [idx_reshape, col_cnt]).output[0] + col_idx = ctx.make_node("Mod", [idx_reshape, col_cnt]).output[0] + idx_diff = ctx.make_node("Sub", [col_idx, row_idx]).output[0] + + if num_upper_const is None or num_upper_const >= 0: + if ctx.get_dtype(num_upper) != TensorProto.INT64: + num_upper = ctx.make_node("Cast", [num_upper], attr={'to': TensorProto.INT64}).output[0] + greater = ctx.make_node("Greater", [idx_diff, num_upper]).output[0] + less_or_equal = ctx.make_node("Not", [greater]).output[0] + conditions.append(less_or_equal) + if num_lower_const is None or num_lower_const >= 0: + if ctx.get_dtype(num_lower) != TensorProto.INT64: + num_lower = ctx.make_node("Cast", [num_lower], attr={'to': TensorProto.INT64}).output[0] + num_lower_neg = ctx.make_node("Neg", [num_lower]).output[0] + greater = ctx.make_node("Greater", [num_lower_neg, idx_diff]).output[0] + less_or_equal = ctx.make_node("Not", [greater]).output[0] + conditions.append(less_or_equal) + if len(conditions) == 0: + node.type = "Identity" + ctx.replace_inputs(node, [data]) + return + if len(conditions) == 1: + cond = conditions[0] + if len(conditions) == 2: + cond = ctx.make_node("And", conditions).output[0] + mask = ctx.make_node("Cast", [cond], attr={'to': ctx.get_dtype(data)}).output[0] + shapes = node.output_shapes + dtypes = node.output_dtypes + ctx.remove_node(node.name) + ctx.make_node(op_type="Mul", inputs=[mask, data], + name=node.name, outputs=node.output, shapes=shapes, + dtypes=dtypes) + + +def _make_softmax_cross_entropy_with_logits(ctx, label, logit, tf_ori_node): + label_dtype = ctx.get_dtype(label.output[0]) + logit_dtype = ctx.get_dtype(logit.output[0]) + utils.make_sure(label_dtype == logit_dtype, "the following logic only works on same dtype of label and logit") + + log_softmax = ctx.make_node(op_type="LogSoftmax", inputs=logit.output) + # implement tf.multiply(-1, tf.reduce_sum(tf.multiply(label, log_softmax), axis=1)) + mul1 = ctx.make_node(op_type="Mul", inputs=[label.output[0], log_softmax.output[0]]) + reduce_sum_output = GraphBuilder(ctx).make_reduce_sum( + {"data": mul1.output[0], "axes": [-1], "keepdims": 1, "noop_with_empty_axes": 1}) + const_negative_one = ctx.make_const(name=utils.make_name("const_negative_one"), + np_val=np.array(-1).astype(utils.ONNX_TO_NUMPY_DTYPE[logit_dtype])) + mul2 = ctx.make_node(op_type="Mul", inputs=[const_negative_one.output[0], reduce_sum_output]) + shapes = tf_ori_node.output_shapes + dtypes = tf_ori_node.output_dtypes + ctx.remove_node(tf_ori_node.name) + GraphBuilder(ctx).make_squeeze({'axes': [1], 'data': mul2.output[0], 'outputs': [tf_ori_node.output[0]]}, + shapes=[shapes[0]], dtypes=[dtypes[0]]) + + +def sparse_softmax_cross_entropy_with_logits_op_by_gathernd(ctx, node, **kwargs): + # make subgraph to implement one_hot, idea comes from onehot_op + indices_name = node.input[1] + indices_shape = ctx.get_shape(indices_name) + if len(indices_shape) != 1: + # TODO: this works for rank=1 but tensorflow supports more than this. + # Same principle should work but we need to implement our own eye. + raise ValueError("onehot op: only rank1 is supported") + logit_name = node.input[0] + logit_dtype = ctx.get_dtype(logit_name) + logit_shape = ctx.get_shape(logit_name) + utils.make_sure(logit_dtype, "Dtype of {} is None".format(logit_name)) + indices_dtype = ctx.get_dtype(indices_name) + if indices_dtype != TensorProto.INT64: + indices_cast = ctx.make_node("Cast", [indices_name], attr={"to": TensorProto.INT64}) + indices_name = indices_cast.output[0] + indices_size = ctx.make_node("Size", [indices_name]) + gb = GraphBuilder(ctx) + indices_unsqueeze = gb.make_unsqueeze({'data': indices_name, "axes": [1]}, return_node=True) + zero_const = ctx.make_const(utils.make_name("zero"), np.array(0, dtype=np.int64)) + one_const = ctx.make_const(utils.make_name("one"), np.array(1, dtype=np.int64)) + id_name = utils.make_name("sparse_softmax_id") + id_output = utils.port_name(id_name) + controlflow.make_range(ctx, zero_const.output[0], indices_size.output[0], one_const.output[0], + id_output, id_name, shape=[-1], dtype=TensorProto.INT64) + id_unsqueeze = gb.make_unsqueeze({'data': id_output, "axes": [1]}, return_node=True) + indices_with_id = ctx.make_node("Concat", + [id_unsqueeze.output[0], indices_unsqueeze.output[0]], + attr={"axis": 1}) + log_softmax = ctx.make_node(op_type="LogSoftmax", + inputs=[logit_name], dtypes=[logit_dtype], shapes=[logit_shape]) + gathernd_name = utils.make_name("sparse_softmax_gathernd") + gathernd_output = utils.port_name(gathernd_name) + tensor.make_gathernd(ctx, log_softmax.output[0], indices_with_id.output[0], gathernd_output, + gathernd_name, logit_dtype, [logit_shape], [logit_dtype]) + const_name = utils.make_name("const_negative_one") + const_negative_one = ctx.make_const(const_name, np.array(-1).astype(utils.map_onnx_to_numpy_type(logit_dtype))) + mul2 = ctx.make_node(op_type="Mul", inputs=[const_negative_one.output[0], gathernd_output]) + shapes = node.output_shapes + dtypes = node.output_dtypes + ctx.remove_node(node.name) + gb = GraphBuilder(ctx) + gb.make_squeeze({'data': mul2.output[0], 'outputs': [node.output[0]], "axes": [1]}, + shapes=[shapes[0]], dtypes=[dtypes[0]]) + + +@tf_op("SoftmaxCrossEntropyWithLogits") +class SoftmaxCrossEntropyWithLogits: + @classmethod + def version_7(cls, ctx, node, **kwargs): + logits = node.inputs[0] + logit_dtype = ctx.get_dtype(logits.output[0]) + labels = node.inputs[1] + label_dtype = ctx.get_dtype(labels.output[0]) + if label_dtype != logit_dtype: + labels = ctx.make_node("Cast", labels.output, attr={"to": logit_dtype}, dtypes=[logit_dtype]) + + _make_softmax_cross_entropy_with_logits(ctx, labels, logits, node) + + +def _make_sparse_softmax_cross_entropy_with_logits(ctx, label, logit, tf_ori_node): + logit = logit.output[0] + label = label.output[0] + label_dtype = ctx.get_dtype(label) + logit_dtype = ctx.get_dtype(logit) + utils.make_sure(label_dtype == logit_dtype, "the following logic only works on same dtype of label and logit") + + # when label is onehot, logic "tf.multiply(-1, tf.reduce_sum(tf.multiply(label, log_softmax), axis=1))" is equal to + # "-log(q_i)" where i is the selected index specified by label, q_i = logic_i/sum, the detail process is as follows: + # logit_exp=exp(logit) >> sum = tf.reduce_sum(logit_exp, axis = -1), masked_sum = reduce_sum(mul(logit_exp, mul)) + # >> -log(masked_sum/sum) + logit_max = ctx.make_node(op_type="ReduceMax", inputs=[logit], attr={"axes": [-1], "keepdims": 1}).output[0] + logit_norm = ctx.make_node(op_type="Sub", inputs=[logit, logit_max]).output[0] + logit_exp = ctx.make_node(op_type="Exp", inputs=[logit_norm]).output[0] + logit_exp_sum = GraphBuilder(ctx).make_reduce_sum( + {"data": logit_exp, "axes": [-1], "keepdims": 0, "noop_with_empty_axes": 1}) + masked = ctx.make_node(op_type="Mul", inputs=[label, logit_exp]).output[0] + masked_sum = GraphBuilder(ctx).make_reduce_sum( + {"data": masked, "axes": [-1], "keepdims": 0, "noop_with_empty_axes": 1}) + probability = ctx.make_node(op_type="Div", inputs=[masked_sum, logit_exp_sum]).output[0] + log_prob = ctx.make_node(op_type="Log", inputs=[probability]).output[0] + const_negative_one = ctx.make_const(name=utils.make_name("const_negative_one"), + np_val=np.array(-1).astype(utils.ONNX_TO_NUMPY_DTYPE[logit_dtype])).output[0] + + shapes = tf_ori_node.output_shapes + dtypes = tf_ori_node.output_dtypes + ctx.remove_node(tf_ori_node.name) + ctx.make_node(op_type="Mul", inputs=[log_prob, const_negative_one], + outputs=[tf_ori_node.output[0]], shapes=[shapes[0]], dtypes=[dtypes[0]]) + + +@tf_op("SparseSoftmaxCrossEntropyWithLogits") +class SparseSoftmaxCrossEntropyWithLogits: + @classmethod + def version_7(cls, ctx, node, **kwargs): + # make subgraph to implement one_hot, idea comes from onehot_op + indices_name = node.input[1] + indices_shape = ctx.get_shape(indices_name) + if len(indices_shape) != 1: + # TODO: this works for rank=1 but tensorflow supports more than this. + # Same principle should work but we need to implement our own eye. + raise ValueError("onehot op: only rank1 is supported") + logit_name = node.input[0] + depth = ctx.get_shape(logit_name)[-1] + # if number of classes is unknown or too large + if depth == utils.ONNX_UNKNOWN_DIMENSION or depth > 20000: + sparse_softmax_cross_entropy_with_logits_op_by_gathernd(ctx, node, **kwargs) + return + logit_dtype = ctx.get_dtype(logit_name) + utils.make_sure(logit_dtype, "Dtype of {} is None".format(logit_name)) + + dtype = utils.map_onnx_to_numpy_type(logit_dtype) + eye = np.eye(depth).astype(dtype) + const_name = utils.make_name("const_eye") + const_eye = ctx.make_const(name=const_name, np_val=eye) + onehot = ctx.make_node(op_type="Gather", inputs=[const_eye.output[0], indices_name], attr={"axis": 0}) + log_softmax = ctx.make_node(op_type="LogSoftmax", inputs=[logit_name]) + # implement tf.multiply(np.float32(-1.0), tf.reduce_sum(tf.multiply(one_hot, log_softmax), axis=1)) + mul1 = ctx.make_node(op_type="Mul", inputs=[onehot.output[0], log_softmax.output[0]]) + reduce_sum_output = GraphBuilder(ctx).make_reduce_sum( + {"data": mul1.output[0], "axes": [1], "keepdims": 1, "noop_with_empty_axes": 1}) + const_name = utils.make_name("const_negative_one") + const_negative_one = ctx.make_const(name=const_name, np_val=np.array(-1).astype(dtype)) + mul2 = ctx.make_node(op_type="Mul", inputs=[const_negative_one.output[0], reduce_sum_output]) + + shapes = node.output_shapes + dtypes = node.output_dtypes + ctx.remove_node(node.name) + ctx.make_node(op_type="Squeeze", inputs=[mul2.output[0]], outputs=[node.output[0]], attr={"axes": [1]}, + shapes=[shapes[0]], dtypes=[dtypes[0]]) + + @classmethod + def version_9(cls, ctx, node, **kwargs): + # float32/64 output = SparseSoftmaxCrossEntropyWithLogits(float32/64 features, int32/64 labels) + # the detail math process of this op is: a = onehot(labels), b = logsoftmax(features), reduce_sum(mul(a, b)) + logit_node = node.inputs[0] + logit_shape = ctx.get_shape(node.input[0]) + logit_dtype = ctx.get_dtype(node.input[0]) + + label_name = node.input[1] + + if logit_shape is not None and logit_shape[-1] != -1: + num_class = logit_shape[-1] + node_nme = utils.make_name("onehot_depth") + depth_node = ctx.make_const(node_nme, np.array([num_class]).astype(np.int64)).output[0] + else: + logit_shape = ctx.make_node("Shape", [node.input[0]]).output[0] + slice_args = {"data": logit_shape, + "starts": [-1], "ends": [int(utils.get_max_value(np.int32))]} + num_class = GraphBuilder(ctx).make_slice(kwargs=slice_args) + depth_node = num_class + values_node = ctx.make_const(utils.make_name("onehot_values"), np.array([0, 1]).astype(np.int64)).output[0] + label_dtype = ctx.get_dtype(label_name) + if label_dtype != TensorProto.INT64: + onehot_indice = ctx.make_node("Cast", [label_name], attr={"to": TensorProto.INT64}).output[0] + else: + onehot_indice = label_name + label_node = ctx.make_node(op_type="OneHot", + inputs=[onehot_indice, depth_node, values_node]) + # the above logic makes output dtype of label_node now always int64 + # make sure label has same dtype as logit + if logit_dtype != TensorProto.INT64: + label_node = ctx.make_node("Cast", label_node.output, attr={"to": logit_dtype}, dtypes=[logit_dtype]) + + _make_sparse_softmax_cross_entropy_with_logits(ctx, label_node, logit_node, node) diff --git a/lib/python3.10/site-packages/tf2onnx/onnx_opset/quantize.py b/lib/python3.10/site-packages/tf2onnx/onnx_opset/quantize.py new file mode 100644 index 0000000000000000000000000000000000000000..9e2ea00ccb8c87bb8c178528063d45c830e782ed --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/onnx_opset/quantize.py @@ -0,0 +1,89 @@ +# SPDX-License-Identifier: Apache-2.0 + + +""" +tensor +""" + +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals + +import logging + +import numpy as np +from onnx.onnx_pb import TensorProto + +from tf2onnx import utils +from tf2onnx.handler import tf_op +from tf2onnx.utils import make_sure + +logger = logging.getLogger(__name__) + + +# pylint: disable=unused-argument,missing-docstring,unused-variable,pointless-string-statement,invalid-name + + +@tf_op(["FakeQuantWithMinMaxArgs", "FakeQuantWithMinMaxVars"]) +class FakeQuantWithMinMaxArgs: + # see https://www.tensorflow.org/api_docs/cc/class/tensorflow/ops/fake-quant-with-min-max-args + @classmethod + def version_10(cls, ctx, node, **kwargs): + # hack to make up for the missing onnx pack op + if node.type == "FakeQuantWithMinMaxVars": + utils.make_sure(node.inputs[1].is_scalar(), "%s node %s requires const scalar value for min", + node.type, node.name) + utils.make_sure(node.inputs[2].is_scalar(), "%s node %s requires const scalar value for max", + node.type, node.name) + amin = node.inputs[1].get_tensor_value() + amax = node.inputs[2].get_tensor_value() + else: + amin = node.get_attr("min").f + amax = node.get_attr("max").f + narrow_range = node.get_attr("narrow_range").i + num_bits = node.get_attr("num_bits").i + + make_sure( + not narrow_range, + "Unable to convert node FakeQuantWithMinMaxArgs with narrow_range=%r", + narrow_range) + make_sure( + num_bits == 8, + "Unable to convert node FakeQuantWithMinMaxArgs with " + "num_bits=%r", num_bits) + + scale = (amax - amin) / (2 ** num_bits - 1) + min_adj = np.around(amin / scale) + + dtype = ctx.get_dtype(node.input[0]) + shape = ctx.get_shape(node.input[0]) + axis = 1 + idtype = TensorProto.UINT8 + + pb_scale = ctx.make_const( + utils.make_name("{}_scaley".format(node.name)), + np.array(scale, dtype=np.float32)) + zero = np.array(-min_adj, dtype=np.uint8) + make_sure( + zero == -min_adj, + "Cannot convert %s node %s with " + "min=%r max=%r numbits=%r because zero_scale=%r " + "is outside uint8 boundary", + node.type, node.name, amin, amax, num_bits, -min_adj) + zero_point = ctx.make_const( + utils.make_name("{}_zpy".format(node.name)), zero) + + new_node = ctx.make_node( + "QuantizeLinear", [node.input[0], pb_scale.name, zero_point.name], + op_name_scope=node.name, attr={"axis": axis}, + shapes=[shape], dtypes=[idtype]) + output_name = new_node.output[0] + ctx.replace_input(node, node.input[0], output_name, 0) + + ctx.remove_node(node.name) + + last_node = ctx.make_node( + "DequantizeLinear", [new_node.output[0], pb_scale.name, zero_point.name], + op_name_scope=node.name, attr={"axis": axis}, + shapes=[shape], dtypes=[dtype]) + ctx.replace_all_inputs(node.output[0], last_node.output[0]) # ops=ctx.get_nodes() diff --git a/lib/python3.10/site-packages/tf2onnx/onnx_opset/reduction.py b/lib/python3.10/site-packages/tf2onnx/onnx_opset/reduction.py new file mode 100644 index 0000000000000000000000000000000000000000..d82e3d51b185938b2e59d185191681a4fd2eaef3 --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/onnx_opset/reduction.py @@ -0,0 +1,316 @@ +# SPDX-License-Identifier: Apache-2.0 + + +""" +reduction +""" + +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals + +import logging + +import numpy as np +from onnx import onnx_pb, helper + +from tf2onnx import utils +from tf2onnx.handler import tf_op +from tf2onnx.graph_builder import GraphBuilder + +logger = logging.getLogger(__name__) + + +# pylint: disable=unused-argument,missing-docstring + +@tf_op("Min", onnx_op="ReduceMin") +@tf_op("Max", onnx_op="ReduceMax") +@tf_op("Mean", onnx_op="ReduceMean") +@tf_op("Sum", onnx_op="ReduceSum") +@tf_op("Prod", onnx_op="ReduceProd") +class ReduceOpBase: + @classmethod + def version_1(cls, ctx, node, **kwargs): + axes_node = node.inputs[1] + axes = axes_node.get_tensor_value() + if np.isscalar(axes): + axes = [axes] + input_shape = ctx.get_shape(node.input[0]) + if input_shape is None: + if any([val < 0 for val in axes]) and ctx.opset < 11: + raise ValueError("reduce_op: cannot have negative axis if opset < 11 because we don't know input rank") + else: + input_rank = len(ctx.get_shape(node.input[0])) + axes = [val + input_rank if val < 0 else val for val in axes] + + node.set_attr("axes", axes) + ctx.remove_input(node, node.input[1], 1) + keep_dims = node.get_attr_value("keep_dims", 0) + node.set_attr("keepdims", keep_dims) + del node.attr['keep_dims'] + + @classmethod + def version_11(cls, ctx, node, **kwargs): + # Opset 11 supports negative axis, but core logic is same + cls.version_1(ctx, node, **kwargs) + + @classmethod + def version_13(cls, ctx, node, **kwargs): + if node.type == "ReduceSum": + keep_dims = node.get_attr_value("keep_dims", 0) + node.set_attr("keepdims", keep_dims) + del node.attr['keep_dims'] + node.set_attr("noop_with_empty_axes", 1) + if ctx.get_dtype(node.input[1]) != onnx_pb.TensorProto.INT64: + ctx.insert_new_node_on_input(node, "Cast", node.input[1], to=onnx_pb.TensorProto.INT64) + input_shape = ctx.get_shape(node.input[1]) + input_rank = len(input_shape) if input_shape is not None else None + if input_rank != 1: + new_shape = ctx.make_const(utils.make_name("reshape_const"), np.array([-1], np.int64)) + ctx.insert_new_node_on_input(node, "Reshape", [node.input[1], new_shape.output[0]]) + else: + cls.version_11(ctx, node, **kwargs) + +@tf_op(["ArgMax", "ArgMin"]) +class ArgMax: + @classmethod + def version_1(cls, ctx, node, **kwargs): + # output_type output = ArgMin(T input, Tidx dimension, @type Tidx, @type output_type) + # tensor(int32) reduced = ArgMin(T data, @INT axis, @INT keepdims) + axis_node = node.inputs[1] + axis = axis_node.get_tensor_value() + if axis < 0: + # ArgMax|ArgMin in onnx don't necessary support negative axis(not in doc explicitly) + input_shape = ctx.get_shape(node.input[0]) + dim_count = len(input_shape) if input_shape else 0 + axis = dim_count + axis + + # TF ArgMin/ArgMax may return int32 or int64 + # Onnx ArgMin/ArgMax only supports int64 output, add cast if needed + if node.get_attr_int("output_type") == onnx_pb.TensorProto.INT32: + # current node will return int64 after conversion, which differs from previous dtype got from tf + ctx.set_dtype(node.output[0], onnx_pb.TensorProto.INT64) + op_name = utils.make_name("Cast") + cast_node = ctx.insert_new_node_on_output("Cast", node.output[0], name=op_name, + to=onnx_pb.TensorProto.INT32) + ctx.set_dtype(cast_node.output[0], onnx_pb.TensorProto.INT32) + ctx.copy_shape(node.output[0], cast_node.output[0]) + + node.set_attr("axis", axis) + node.set_attr("keepdims", 0) + ctx.remove_input(node, node.input[1], 1) + + @classmethod + def version_11(cls, ctx, node, **kwargs): + # Opset 11 supports negative axis, but core logic same + cls.version_1(ctx, node, **kwargs) + + @classmethod + def version_12(cls, ctx, node, **kwargs): + # Opset 12 adds extra attribute 'select_last_index' + # No changes needed + cls.version_1(ctx, node, **kwargs) + +@tf_op(["All", "Any"]) +class AllAny: + @classmethod + def version_6(cls, ctx, node, **kwargs): + # T output = All(T x, list(int) reduce_indices, @bool keepdims) + # T output = Any(T x, list(int) reduce_indices, @bool keepdims) + reduce_dim = node.inputs[1].get_tensor_value() + + # for Any, the reduce_indices can be scalar as observed. + if np.isscalar(reduce_dim): + reduce_dim = [reduce_dim] + + if ctx.opset < 11: + utils.make_sure(all(i >= 0 for i in reduce_dim), "negative reduce axis is not supported in onnx for now") + + cast = ctx.make_node(op_type="Cast", inputs=[node.input[0]], attr={"to": onnx_pb.TensorProto.FLOAT}) + keepdims = helper.get_attribute_value(node.get_attr("keep_dims")) + op_type = "ReduceMin" if node.type == "All" else "ReduceSum" + + if op_type == "ReduceSum": + reduce_node_output = GraphBuilder(ctx).make_reduce_sum( + {"data": cast.output[0], "axes": reduce_dim, "keepdims": keepdims, "noop_with_empty_axes": 1}) + else: + reduce_node_output = ctx.make_node(op_type=op_type, inputs=cast.output, + attr={"axes": reduce_dim, "keepdims": keepdims}).output[0] + + zero_node = ctx.make_const(utils.make_name("zero_reduce"), np.array(0, dtype=np.float32)) + + shapes = node.output_shapes + dtypes = node.output_dtypes + ctx.remove_node(node.name) + ctx.make_node(op_type="Greater", inputs=[reduce_node_output, zero_node.output[0]], + name=node.name, outputs=node.output, shapes=shapes, dtypes=dtypes) + + +@tf_op("AddN") +class AddN(): + @classmethod + def version_6(cls, ctx, node, **kwargs): + node.type = "Sum" + + +@tf_op(["SegmentSum", "SegmentProd", "SegmentMax", "SegmentMin", "SegmentMean", + "SparseSegmentSum", "SparseSegmentMean", "SparseSegmentSqrtN", + "SparseSegmentSumWithNumSegments", "SparseSegmentMeanWithNumSegments", "SparseSegmentSqrtNWithNumSegments", + "UnsortedSegmentSum", "UnsortedSegmentProd", "UnsortedSegmentMax", "UnsortedSegmentMin"]) +class SegmentSum(): + @classmethod + def any_version(cls, opset, ctx, node, **kwargs): + node_inputs = node.input + num_segments_specified = False + if node.type.endswith("WithNumSegments") or node.type.startswith("Unsorted"): + num_segments_specified = True + num_segments = node_inputs.pop() + node.type = node.type.replace("WithNumSegments", "") + node.type = node.type.replace("Unsorted", "") + if node.type.startswith("Sparse"): + data_inp, indices_inp, segment_inp = node_inputs + gather_node = ctx.make_node("Gather", [data_inp, indices_inp], attr={'axis': 0}) + data_inp = gather_node.output[0] + node.type = node.type.replace("Sparse", "") + else: + data_inp, segment_inp = node_inputs + + # Data has shape [n, a, b, ..., c] + data_shape = ctx.get_shape(data_inp) + data_rank = len(data_shape) if data_shape is not None else None + data_dtype = ctx.get_dtype(data_inp) + seg_rank = ctx.get_rank(segment_inp) + utils.make_sure(seg_rank == 1, "Segment ops only supported for segments of rank 1, not %s", seg_rank) + data_np_dtype = utils.map_onnx_to_numpy_type(data_dtype) + seg_np_dtype = utils.map_onnx_to_numpy_type(ctx.get_dtype(segment_inp)) + + if num_segments_specified and ctx.get_dtype(segment_inp) != ctx.get_dtype(num_segments): + num_segments = ctx.make_node("Cast", [num_segments], attr={"to": ctx.get_dtype(segment_inp)}).output[0] + + data_is_float = np.dtype(data_np_dtype).kind == 'f' + data_is_int = np.dtype(data_np_dtype).kind == 'i' + utils.make_sure(data_is_float or data_is_int, "dtype for Segment ops must be float or int") + + if node.type in ["SegmentSum", "SegmentMean", "SegmentSqrtN"]: + onnx_op = "ReduceSum" + identity_value = np.array(0, dtype=data_np_dtype) + elif node.type == "SegmentProd": + onnx_op = "ReduceProd" + identity_value = np.array(1, dtype=data_np_dtype) + elif node.type == "SegmentMax": + onnx_op = "ReduceMax" + if data_is_float: + identity_value = np.array('-inf', dtype=data_np_dtype) + else: + identity_value = np.iinfo(data_np_dtype).min + elif node.type == "SegmentMin": + onnx_op = "ReduceMin" + if data_is_float: + identity_value = np.array('inf', dtype=data_np_dtype) + else: + identity_value = np.iinfo(data_np_dtype).max + + if not num_segments_specified: + max_segment = ctx.make_node("ReduceMax", [segment_inp], attr={'axes': [0], 'keepdims': 0}) + one_const = ctx.make_const(utils.make_name("const_one"), np.array(1, dtype=seg_np_dtype)) + num_segments = ctx.make_node("Add", [max_segment.output[0], one_const.output[0]]).output[0] + # ORT doesn't support bool for OneHot so we use float32 and cast to bool + onehot_values = ctx.make_const(utils.make_name("onehot_values"), np.array([0, 1], dtype=np.float32)) + # one_hot_node has shape [s, n] (s is # segments) + one_hot_node = ctx.make_node("OneHot", [segment_inp, num_segments, onehot_values.output[0]], + attr={'axis': 0}) + if node.type == "SegmentMean": + scaling_node_output = GraphBuilder(ctx).make_reduce_sum( + {"data": one_hot_node.output[0], "axes": [1], "keepdims": 0, "noop_with_empty_axes": 1}) + elif node.type == "SegmentSqrtN": + seg_cnts_node_output = GraphBuilder(ctx).make_reduce_sum( + {"data": one_hot_node.output[0], "axes": [1], "keepdims": 0, "noop_with_empty_axes": 1}) + scaling_node_output = ctx.make_node("Sqrt", [seg_cnts_node_output]).output[0] + else: + scaling_node_output = None + + if scaling_node_output is not None and num_segments_specified: + # If empty segments are possible, we must avoid division by zero + const_one_float = ctx.make_const(utils.make_name("const_one_float"), np.array(1, dtype=np.float32)) + scaling_node_output = ctx.make_node("Max", [scaling_node_output, const_one_float.output[0]]).output[0] + + + if onnx_op == "ReduceSum": + # If the op is a summation, we can use MatMul instead of Where, which is faster + + # Data shape is [n, a, b, ..., c] + data_shape_node = ctx.make_node("Shape", [data_inp]) + new_shape = ctx.make_const(utils.make_name("reshape_const"), np.array([0, -1], dtype=np.int64)) + # Reshape the data from [n, a, b, ..., c] to [n, P] + data_reshape = ctx.make_node("Reshape", [data_inp, new_shape.output[0]]) + + one_hot_cast = one_hot_node + if data_dtype != onnx_pb.TensorProto.FLOAT: + one_hot_cast = ctx.make_node("Cast", [one_hot_node.output[0]], attr={'to': data_dtype}) + + # Shapes [s, n] * [n, P] => [s, P] + product = ctx.make_node("MatMul", [one_hot_cast.output[0], data_reshape.output[0]], op_name_scope=node.name) + if scaling_node_output is not None: + scaling_node_unsqueeze = GraphBuilder(ctx).make_unsqueeze( + {'data': scaling_node_output, 'axes': [1]}, return_node=True) + product = ctx.make_node("Div", [product.output[0], scaling_node_unsqueeze.output[0]]) + + # Create new shape [0, a, b, ..., c] + max_int64 = int(utils.get_max_value(np.int64)) + new_shape_slice = GraphBuilder(ctx).make_slice( + {"data": data_shape_node.output[0], "ends": [max_int64], "starts": [1], "axes": [0]}) + zero_const = ctx.make_const(utils.make_name("zero_const"), np.array([0], dtype=np.int64)) + new_shape = ctx.make_node("Concat", [zero_const.output[0], new_shape_slice], attr={'axis': 0}) + + shapes = node.output_shapes + dtypes = node.output_dtypes + ctx.remove_node(node.name) + # Reshape result from [s, P] to [s, a, b, ..., c] + ctx.make_node("Reshape", [product.output[0], new_shape.output[0]], + name=node.name, outputs=node.output, shapes=shapes, dtypes=dtypes) + return + + identity_const = ctx.make_const(utils.make_name("const_identity"), identity_value) + one_hot_bool = ctx.make_node("Cast", [one_hot_node.output[0]], attr={"to": onnx_pb.TensorProto.BOOL}) + one_hot_unsqueeze = one_hot_bool + + # Make one_hot_unsqueeze have shape [s, n, 1, 1, ..., 1] + if data_rank is None: + # Unsqueeze requires known rank, but we can use Reshape if rank is unknown + shape_node = ctx.make_node("Shape", [data_inp]) + rank_node = ctx.make_node("Shape", [shape_node.output[0]]) + one_const_int64 = ctx.make_const(utils.make_name("const_one"), np.array([1], dtype=np.int64)) + num_unsqueeze_dims = ctx.make_node("Sub", [rank_node.output[0], one_const_int64.output[0]]) + + one_tensor = helper.make_tensor("value", onnx_pb.TensorProto.INT64, dims=[1], vals=[1]) + unsqueeze_dims = ctx.make_node("ConstantOfShape", inputs=[num_unsqueeze_dims.output[0]], + attr={"value": one_tensor}) + # Zero indicates a dimension should be unchanged + double_zero_const = ctx.make_const(utils.make_name("double_zero"), np.array([0, 0], dtype=np.int64)) + expanded_shape = ctx.make_node("Concat", [double_zero_const.output[0], unsqueeze_dims.output[0]], + attr={'axis': 0}) + one_hot_unsqueeze = ctx.make_node("Reshape", [one_hot_bool.output[0], expanded_shape.output[0]]) + elif data_rank > 1: + new_dims = list(range(2, 2 + data_rank - 1)) + one_hot_unsqueeze = GraphBuilder(ctx).make_unsqueeze( + {'data': one_hot_bool.output[0], 'axes': new_dims}, return_node=True) + + # Shape of data: [n, a, b, ..., c] + # Shape of one_hot: [s, n, 1, 1, ..., 1] + # Broadcast left-pads shape with 1s, so result is shape: [s, n, a, b, ..., c] + where_node = ctx.make_node("Where", [one_hot_unsqueeze.output[0], data_inp, identity_const.output[0]]) + + shapes = node.output_shapes + dtypes = node.output_dtypes + ctx.remove_node(node.name) + # After reduction over axis 1, shape is: [s, a, b, ..., c] + ctx.make_node(onnx_op, [where_node.output[0]], attr={'axes': [1], 'keepdims': 0}, + name=node.name, outputs=node.output, shapes=shapes, dtypes=dtypes) + + @classmethod + def version_9(cls, ctx, node, **kwargs): + cls.any_version(9, ctx, node, **kwargs) + + @classmethod + def version_13(cls, ctx, node, **kwargs): + cls.any_version(13, ctx, node, **kwargs) diff --git a/lib/python3.10/site-packages/tf2onnx/onnx_opset/rnn.py b/lib/python3.10/site-packages/tf2onnx/onnx_opset/rnn.py new file mode 100644 index 0000000000000000000000000000000000000000..89be41394da533b8522672406f905eea2af6f86c --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/onnx_opset/rnn.py @@ -0,0 +1,263 @@ +# SPDX-License-Identifier: Apache-2.0 + + +""" +rnn +""" + +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals + +import logging +import numpy as np +from tf2onnx import utils +from tf2onnx.handler import tf_op +from tf2onnx.graph_builder import GraphBuilder + +logger = logging.getLogger(__name__) + + +# pylint: disable=unused-argument,missing-docstring + +@tf_op("LSTMBlockCell") +class LSTMBlockCell: + @classmethod + def version_1(cls, ctx, node, **kwargs): + """ + Args: + x: A `Tensor`. Must be one of the following types: `float32`. + The input to the LSTM cell, shape (batch_size, num_inputs). + cs_prev: A `Tensor`. Must have the same type as `x`. + Value of the cell state at previous time step. + h_prev: A `Tensor`. Must have the same type as `x`. + Output of the previous cell at previous time step. + w: A `Tensor`. Must have the same type as `x`. The weight matrix. + wci: A `Tensor`. Must have the same type as `x`. + The weight matrix for input gate peephole connection. + wcf: A `Tensor`. Must have the same type as `x`. + The weight matrix for forget gate peephole connection. + wco: A `Tensor`. Must have the same type as `x`. + The weight matrix for output gate peephole connection. + b: A `Tensor`. Must have the same type as `x`. The bias vector. + forget_bias: An optional `float`. Defaults to `1`. The forget gate bias. + cell_clip: An optional `float`. Defaults to `-1` (no clipping). + Value to clip the 'cs' value to. Disable by setting to negative value. + use_peephole: An optional `bool`. Defaults to `False`. + Whether to use peephole weights. + name: A name for the operation (optional). + Returns: + A tuple of `Tensor` objects (i, cs, f, o, ci, co, h). + i: A `Tensor`. Has the same type as `x`. The input gate. + cs: A `Tensor`. Has the same type as `x`. The cell state before the tanh. + f: A `Tensor`. Has the same type as `x`. The forget gate. + o: A `Tensor`. Has the same type as `x`. The output gate. + ci: A `Tensor`. Has the same type as `x`. The cell input. + co: A `Tensor`. Has the same type as `x`. The cell after the tanh. + h: A `Tensor`. Has the same type as `x`. The output h vector. + ```python + xh = [x, h_prev] + [i, ci, f, o] = xh * w + b + f = f + forget_bias + if not use_peephole: + wci = wcf = wco = 0 + i = sigmoid(cs_prev .* wci + i) + f = sigmoid(cs_prev .* wcf + f) + ci = tanh(ci) + cs = ci .* i + cs_prev .* f + cs = clip(cs, cell_clip) + o = sigmoid(cs * wco + o) + co = tanh(cs) + h = co .* o + ``` + """ + nodes = [] + x, cs_prev, h_prev, w, wci, wcf, wco, b = node.input + forget_bias = float(node.get_attr("forget_bias").f) + cell_clip = float(node.get_attr("cell_clip").f) + use_peephole = bool(node.get_attr("use_peephole").i) + + def make_sigmoid(i, w, b): + i_w_node = ctx.make_node("Mul", [i, w]) + i_w_b_node = ctx.make_node("Add", [i_w_node.output[0], b]) + output_node = ctx.make_node("Sigmoid", [i_w_b_node.output[0]]) + nodes.extend([i_w_node, i_w_b_node, output_node]) + return output_node.output[0] + + # xh = [x, h] + xh_node = ctx.make_node("Concat", [x, h_prev], attr={"axis": 1}) + + # i, ci, f, o = xh * w + b + xh_w_node = ctx.make_node("MatMul", [xh_node.output[0], w]) + w_shape = ctx.get_shape(w) + if len(w_shape) != 2 or w_shape[1] % 4 != 0: + raise RuntimeError("shape of W of LSTMBlockCell {} should be times of 4".format(node.name)) + merged_output_node = ctx.make_node("Add", [xh_w_node.output[0], b]) + w_last_dim = int(w_shape[1] / 4) + split_output_node = ctx.make_node( + "Split", [merged_output_node.output[0]], + attr={"axis": 1}, + output_count=4 + ) + i, ci, f, o = split_output_node.output + + # f = f + forget_bias + forget_bias_const = ctx.make_const( + utils.make_name("{}__forget_bias".format(node.name)), + np.array(forget_bias, dtype=np.float32) + ) + f_node = ctx.make_node("Add", [f, forget_bias_const.output[0]]) + + if not use_peephole: + zeros_const = ctx.make_const( + utils.make_name("{}__zeros_const".format(node.name)), + np.zeros([w_last_dim], dtype=np.float32) + ) + nodes.append(zeros_const) + wci = zeros_const.output[0] + wcf = zeros_const.output[0] + wco = zeros_const.output[0] + + # i = sigmoid(cs_prev .* wci + i) + i = make_sigmoid(cs_prev, wci, i) + # f = sigmoid(cs_prev .* wcf + f) + f = make_sigmoid(cs_prev, wcf, f_node.output[0]) + # ci = Tanh(ci) + ci_node = ctx.make_node("Tanh", [ci]) + # cs = ci .* i + f .* cs_prev + ci_i_node = ctx.make_node("Mul", [ci_node.output[0], i]) + cs_prev_f_node = ctx.make_node("Mul", [cs_prev, f]) + cs_node = ctx.make_node("Add", [ci_i_node.output[0], cs_prev_f_node.output[0]]) + cs = cs_node.output[0] + # cs = clip(cs) + if cell_clip > 0: + if ctx.opset < 11: + cs_clip_node = ctx.make_node("Clip", [cs], attr={"max": cell_clip, "min": -cell_clip}) + nodes.append(cs_clip_node) + cs = cs_clip_node.output[0] + else: + dtype = utils.map_onnx_to_numpy_type(ctx.get_dtype(cs)) + name_min = utils.make_name("{}_min".format(node.name)) + name_max = utils.make_name("{}_max".format(node.name)) + min_const = ctx.make_const(name_min, np.array(-cell_clip, dtype=dtype)) + max_const = ctx.make_const(name_max, np.array(cell_clip, dtype=dtype)) + cs_clip_node = ctx.make_node('Clip', [cs, min_const.output[0], max_const.output[0]]) + nodes.append(cs_clip_node) + cs = cs_clip_node.output[0] + + # o = cs * wco + o + o = make_sigmoid(cs, wco, o) + # co = Tanh(cs) + co_node = ctx.make_node("Tanh", [cs]) + # h = co .* o + h_node = ctx.make_node("Mul", [co_node.output[0], o]) + + def replace_output(old_output, new_output): + ctx.replace_all_inputs(old_output, new_output) # ops=ctx.get_nodes() + ctx.copy_dtype(old_output, new_output) + ctx.copy_shape(old_output, new_output) + + replace_output(node.output[0], i) + replace_output(node.output[1], cs) + replace_output(node.output[2], f) + replace_output(node.output[3], o) + replace_output(node.output[4], ci_node.output[0]) + replace_output(node.output[5], co_node.output[0]) + replace_output(node.output[6], h_node.output[0]) + + @classmethod + def version_7(cls, ctx, node, **kwargs): + cls.version_1(ctx, node, **kwargs) + + +@tf_op("CudnnRNN") +class CudnnRNN: + @classmethod + def version_10(cls, ctx, node, **kwargs): + x = node.input[0] + x_shape = ctx.get_shape(x) + h = node.input[1] + h_shape = ctx.get_shape(h) + p = node.input[3] + utils.make_sure( + node.attr["rnn_mode"].s == b"gru", + "rnn mode other than gru are not supported yet" + ) + utils.make_sure( + node.attr["dropout"].f == 0, + "dropout not supported yet" + ) + utils.make_sure( + node.attr["input_mode"].s == b"linear_input", + "input mode must be linear input" + ) + num_dirs = 1 if node.attr["direction"].s == b"unidirectional" else 2 + num_layers = int(h_shape[0] / num_dirs) + num_units = hidden_size = h_shape[2] + input_size = x_shape[2] + w_shape = [num_layers * num_dirs, 3 * hidden_size, input_size] + w_shape_const = ctx.make_const(utils.make_name("w_shape"), np.array(w_shape, dtype=np.int64)) + r_shape = [num_layers * num_dirs, 3 * hidden_size, hidden_size] + r_shape_const = ctx.make_const(utils.make_name("r_shape"), np.array(r_shape, dtype=np.int64)) + b_shape = [num_layers * num_dirs, 6 * hidden_size] + b_shape_const = ctx.make_const(utils.make_name("b_shape"), np.array(b_shape, dtype=np.int64)) + zero_const = ctx.make_const(utils.make_name("zero"), np.array([0], dtype=np.int64)) + w_end = np.prod(w_shape) + w_end_const = ctx.make_const(utils.make_name("w_end"), np.array([w_end], dtype=np.int64)) + r_end = w_end + np.prod(r_shape) + r_end_const = ctx.make_const(utils.make_name("r_end"), np.array([r_end], dtype=np.int64)) + b_end = r_end + np.prod(b_shape) + b_end_const = ctx.make_const(utils.make_name("b_end"), np.array([b_end], dtype=np.int64)) + + def name(nm): + return node.name + "_" + nm + + ws = [name('W_' + str(i)) for i in range(num_layers * num_dirs)] + rs = [name('R_' + str(i)) for i in range(num_layers * num_dirs)] + bs = [name('B_' + str(i)) for i in range(num_layers * num_dirs)] + hs = [name('H_' + str(i)) for i in range(num_layers * num_dirs)] + yhs = [name('YH_' + str(i)) for i in range(num_layers * num_dirs)] + w_flattened = ctx.make_node('Slice', [p, zero_const.output[0], w_end_const.output[0]]) + r_flattened = ctx.make_node('Slice', [p, w_end_const.output[0], r_end_const.output[0]]) + b_flattened = ctx.make_node('Slice', [p, r_end_const.output[0], b_end_const.output[0]]) + w = utils.make_name('W') + r = utils.make_name('R') + b = utils.make_name('B') + ctx.make_node('Reshape', [w_flattened.output[0], w_shape_const.output[0]], outputs=[w]) + ctx.make_node('Reshape', [r_flattened.output[0], r_shape_const.output[0]], outputs=[r]) + ctx.make_node('Reshape', [b_flattened.output[0], b_shape_const.output[0]], outputs=[b]) + ctx.make_node('Split', [w], outputs=ws) + ctx.make_node('Split', [r], outputs=rs) + ctx.make_node('Split', [b], outputs=bs) + ctx.make_node('Split', [h], outputs=hs) + + builder = GraphBuilder(ctx) + + xnf = xnb = x + for i in range(num_layers): + suffix = '_' + str(i * num_dirs) + ctx.make_node('GRU', + [xnf, name('W' + suffix), name('R' + suffix), name('B' + suffix), '', name('H' + suffix)], + outputs=[name('Y' + suffix), name('YH' + suffix)], + attr={'direction': 'forward', 'hidden_size': num_units}) + xnf = name(x + suffix) + builder.make_squeeze({'data': name('Y' + suffix), 'outputs': [xnf], 'axes': [1]}) + if num_dirs == 2: + suffix = '_' + str(i * 2 + 1) + ctx.make_node('GRU', + [xnb, name('W' + suffix), name('R' + suffix), name('B' + suffix), '', name('H' + suffix)], + outputs=[name('Y' + suffix), name('YH' + suffix)], + attr={'direction': 'reverse', 'hidden_size': num_units}) + xnb = name(x + suffix) + builder.make_squeeze({'data': name('Y' + suffix), 'outputs': [xnb], 'axes': [1]}) + ctx.remove_node(node.name) + if num_dirs == 2: + ctx.make_node('Concat', [xnf, xnb], outputs=[node.output[0]], attr={'axis': -1}) + else: + ctx.make_node('Identity', [xnf], outputs=[node.output[0]]) + ctx.make_node('Concat', yhs, outputs=[node.output[1]], attr={'axis': 0}) + + @classmethod + def version_13(cls, ctx, node, **kwargs): + # Squeeze changed in Opset 13. + cls.version_10(ctx, node, **kwargs) diff --git a/lib/python3.10/site-packages/tf2onnx/onnx_opset/signal.py b/lib/python3.10/site-packages/tf2onnx/onnx_opset/signal.py new file mode 100644 index 0000000000000000000000000000000000000000..2bc8f5ae4d945ad09f11a93ce880d2e6f65d0c41 --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/onnx_opset/signal.py @@ -0,0 +1,312 @@ +# SPDX-License-Identifier: Apache-2.0 + + +""" +signal +""" + +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals + +import logging + +import numpy as np +from onnx import onnx_pb, helper +from onnx.numpy_helper import to_array +from tf2onnx import utils +from tf2onnx.handler import tf_op +from tf2onnx.graph_builder import GraphBuilder + +logger = logging.getLogger(__name__) + + +# pylint: disable=unused-argument,missing-docstring + +def make_dft_constant(length, dtype, fft_length): + n = np.arange(length) + k = n.reshape((length, 1)).astype(np.float64) + mat = np.exp(-2j * np.pi * k * n / length) + mat = mat[:fft_length // 2 + 1] + both = np.empty((2,) + mat.shape, dtype=dtype) + both[0, :, :] = np.real(mat) + both[1, :, :] = np.imag(mat) + return both + + +class CommonFFTOp: + @classmethod + def any_version(cls, const_length, opset, ctx, node, **kwargs): + """ + Inspired from `Python implementation of RFFT + `_. + + Complex version: + + :: + + import numpy as np + + def _DFT_cst(N, fft_length): + n = np.arange(N) + k = n.reshape((N, 1)).astype(np.float64) + M = np.exp(-2j * np.pi * k * n / N) + return M[:fft_length // 2 + 1] + + def DFT(x, fft_length=None): + if len(x.shape) == 1: + x = x.reshape((-1, 1)) + else: + x = x.T + if fft_length is None: + fft_length = x.shape[0] + cst = _DFT_cst(x.shape[0], fft_length) + return np.dot(cst, x).T + + Real version, first axis is (real, imag) part: + + :: + + import numpy as np + + def _DFT_real_cst(N, fft_length): + n = np.arange(N) + k = n.reshape((N, 1)).astype(np.float64) + M = np.exp(-2j * np.pi * k * n / N) + M = M[:fft_length // 2 + 1] + both = np.empty((2,) + M.shape) + both[0, :, :] = np.real(M) + both[1, :, :] = np.imag(M) + return both + + def DFT_real(x, fft_length=None): + if len(x.shape) == 1: + x = x.reshape((-1, 1)) + else: + x = x.T + if fft_length is None: + fft_length = x.shape[0] + cst = _DFT_real_cst(x.shape[0], fft_length) + res = np.dot(cst, x) + return np.transpose(res, (0, 2, 1)) + """ + supported_dtypes = [ + onnx_pb.TensorProto.FLOAT, + onnx_pb.TensorProto.FLOAT16, + onnx_pb.TensorProto.DOUBLE, + onnx_pb.TensorProto.COMPLEX64, + onnx_pb.TensorProto.COMPLEX128, + ] + consumers = ctx.find_output_consumers(node.output[0]) + consumer_types = set(op.type for op in consumers) + utils.make_sure( + consumer_types == {'ComplexAbs'}, + "Current implementation of RFFT or FFT only allows ComplexAbs as consumer not %r", + consumer_types) + + input_name = node.input[0] + onnx_dtype = ctx.get_dtype(input_name) + utils.make_sure(onnx_dtype in supported_dtypes, "Unsupported input type.") + shape = ctx.get_shape(node.input[0]) + shape_n = shape[-1] + + if onnx_dtype in (onnx_pb.TensorProto.COMPLEX64, onnx_pb.TensorProto.COMPLEX128): + parent = ctx.get_node_by_output_in_current_graph(node.input[0]) + utils.make_sure( + parent.type == 'Cast' and parent.get_attr_value('to') == onnx_dtype, + "Current implementation of FFT or RFFT assumes the input is real or complex produced " + "by a node Cast just before this one.") + input_name = parent.input[0] + onnx_dtype = ctx.get_dtype(input_name) + + np_dtype = utils.map_onnx_to_numpy_type(onnx_dtype) + + if np_dtype == np.float16: + res_onnx_dtype = utils.map_numpy_to_onnx_dtype(np.float16) + np_dtype = np.float16 + elif np_dtype in (np.float32, np.complex64): + res_onnx_dtype = utils.map_numpy_to_onnx_dtype(np.float32) + np_dtype = np.float32 + else: + res_onnx_dtype = utils.map_numpy_to_onnx_dtype(np.float64) + np_dtype = np.float64 + + if const_length: + # RFFT: length of FFT is known, some computation + # (see function make_dft_constant) + # can be done at conversion time and stored as constant + utils.make_sure(len(node.input) == 2, "Two inputs expected not %r", len(node.input)) + + # This input should be a constant. + fft_length_name = node.input[1] + node_fft_length = ctx.get_node_by_output(fft_length_name, search_in_parent_graphs=True) + utils.make_sure(node_fft_length.type == 'Const', + "fft_length should be a constant, the other case is not implemented yet.") + value = node_fft_length.get_attr("value") + value_array = to_array(value.t) + utils.make_sure(value_array.shape == (1,), "Unexpected shape for fft_length (%r)", value_array.shape) + fft_length = value_array[0] + + # TODO: handle this parameter when onnx.helper.make_node is fixed. + # Tcomplex = node.get_attr("Tcomplex") + + real_imag_part = make_dft_constant(shape_n, np_dtype, fft_length) + onx_real_imag_part = ctx.make_const( + name=utils.make_name('cst_rfft_%d' % shape_n), np_val=real_imag_part) + onx_real_imag_part_name = onx_real_imag_part.name + else: + # FFT: length of FFT is unknown, the matrix + # created by function make_dft_constant must be + # done in ONNX. + dyn_shape_all = ctx.make_node("Shape", inputs=[input_name], + name=utils.make_name('CPLX_' + node.name + 'shape')) + m1_cst = ctx.make_const(name=utils.make_name('CPLX_m1'), np_val=np.array([-1], dtype=np.int64)) + dyn_shape = ctx.make_node('Gather', inputs=[dyn_shape_all.output[0], m1_cst.name]) + one_tensor = helper.make_tensor("value", res_onnx_dtype, dims=[1], vals=[1]) + cst_1 = ctx.make_node("ConstantOfShape", inputs=[dyn_shape.output[0]], attr={"value": one_tensor}) + just_0 = ctx.make_const(name=utils.make_name('CPLX1'), np_val=np.array([0], dtype=np.int64)) + rng1 = ctx.make_node("CumSum", inputs=[cst_1.output[0], just_0.name], + name=utils.make_name('CPLX_' + node.name + 'range')) + p1_cst = ctx.make_const(name=utils.make_name('CPLX_p1'), np_val=np.array([1], dtype=np_dtype)) + rng = ctx.make_node("Sub", inputs=[rng1.output[0], p1_cst.name], + name=utils.make_name('CPLX_' + node.name + 'range')) + resh_cst = ctx.make_const(name=utils.make_name('CPLX_reshape'), np_val=np.array([1, -1], dtype=np.int64)) + rng_tr1 = ctx.make_node("Reshape", inputs=[rng.output[0], resh_cst.name], + name=utils.make_name('CPLX_' + node.name + 'range')) + resh_cst = ctx.make_const(name=utils.make_name('CPLX_reshape'), np_val=np.array([-1, 1], dtype=np.int64)) + rng_tr2 = ctx.make_node("Reshape", inputs=[rng.output[0], resh_cst.name], + name=utils.make_name('CPLX_' + node.name + 'range')) + rng_mat = ctx.make_node('MatMul', inputs=[rng_tr2.output[0], rng_tr1.output[0]], + name=utils.make_name('CPLX_' + node.name + 'range2')) + pi_cst = ctx.make_const(name=utils.make_name('CPLX_pi'), np_val=np.array([np.pi * 2], dtype=np_dtype)) + angle_pi = ctx.make_node("Mul", inputs=[rng_mat.output[0], pi_cst.name], + name=utils.make_name('CPLX_' + node.name + 'angle_pi')) + shape_cast = ctx.make_node('Cast', inputs=[dyn_shape.output[0]], attr={'to': res_onnx_dtype}) + angle_pibn = ctx.make_node("Div", inputs=[angle_pi.output[0], shape_cast.output[0]], + name=utils.make_name('CPLX_' + node.name + 'angle')) + if opset >= 13: + angle = ctx.make_node("Unsqueeze", inputs=[angle_pibn.output[0], just_0.name], + name=utils.make_name('CPLX_' + node.name + 'angles')) + else: + angle = ctx.make_node("Unsqueeze", inputs=[angle_pibn.output[0]], + name=utils.make_name('CPLX_' + node.name + 'angles'), + attr={'axes': [0]}) + rng_cos = ctx.make_node("Cos", inputs=[angle.output[0]], + name=utils.make_name('CPLX_' + node.name + 'cos')) + rng_sin = ctx.make_node("Sin", inputs=[angle.output[0]], + name=utils.make_name('CPLX_' + node.name + 'sin')) + onx_real_imag_part = ctx.make_node("Concat", inputs=[rng_cos.output[0], rng_sin.output[0]], + name=utils.make_name('CPLX_' + node.name + '_cst_fft'), + attr={'axis': 0}) + onx_real_imag_part_name = onx_real_imag_part.output[0] + + shapei = list(np.arange(len(shape))) + perm = shapei[:-2] + [shapei[-1], shapei[-2]] + trx = ctx.make_node( + "Transpose", inputs=[input_name], attr=dict(perm=perm), + name=utils.make_name(node.name + 'tr')) + + ctx.remove_node(node.name) + mult = ctx.make_node( + "MatMul", inputs=[onx_real_imag_part_name, trx.output[0]], + name=utils.make_name('CPLX_' + node.name + 'rfft')) + + new_shape = [2] + list(shape) + shapei = list(np.arange(len(new_shape))) + perm = shapei[:-2] + [shapei[-1], shapei[-2]] + last_node = ctx.make_node( + "Transpose", inputs=[mult.output[0]], attr=dict(perm=perm), + name=utils.make_name('CPLX_' + node.name + 'rfft'), + shapes=[new_shape], dtypes=[res_onnx_dtype]) + + ctx.replace_all_inputs(node.output[0], last_node.output[0]) # ops=ctx.get_nodes() + + +@tf_op("RFFT") +class RFFTOp(CommonFFTOp): + # support more dtype + + @classmethod + def version_1(cls, ctx, node, **kwargs): + return cls.any_version(True, 1, ctx, node, **kwargs) + + +@tf_op("FFT") +class FFTOp(CommonFFTOp): + # support more dtype + + @classmethod + def version_1(cls, ctx, node, **kwargs): + return cls.any_version(False, 1, ctx, node, **kwargs) + + @classmethod + def version_13(cls, ctx, node, **kwargs): + return cls.any_version(False, 13, ctx, node, **kwargs) + + +@tf_op("ComplexAbs") +class ComplexAbsOp: + # support more dtype + + @classmethod + def any_version(cls, opset, ctx, node, **kwargs): + """ + Computes the modules of a complex. + If the matrix dtype is not complex64 or complex128, + it assumes the first dimension means real part (0) + and imaginary part (1, :, :...). + """ + supported_dtypes = [ + onnx_pb.TensorProto.FLOAT, + onnx_pb.TensorProto.FLOAT16, + onnx_pb.TensorProto.DOUBLE, + onnx_pb.TensorProto.COMPLEX64, + onnx_pb.TensorProto.COMPLEX128, + ] + onnx_dtype = ctx.get_dtype(node.input[0]) + utils.make_sure(onnx_dtype in supported_dtypes, "Unsupported input type.") + shape = ctx.get_shape(node.input[0]) + np_dtype = utils.map_onnx_to_numpy_type(onnx_dtype) + utils.make_sure(shape[0] == 2, "ComplexAbs expected the first dimension to be 2 but shape is %r", shape) + + ind0 = ctx.make_const(name=utils.make_name('cst0'), np_val=np.array([0], dtype=np.int64)) + ind1 = ctx.make_const(name=utils.make_name('cst1'), np_val=np.array([1], dtype=np.int64)) + p2 = ctx.make_const(name=utils.make_name('p2'), np_val=np.array([2], dtype=np_dtype)) + + real_part = ctx.make_node( + 'Gather', inputs=[node.input[0], ind0.name], attr=dict(axis=0), + name=utils.make_name('Real_' + node.name)) + imag_part = ctx.make_node( + 'Gather', inputs=[node.input[0], ind1.name], attr=dict(axis=0), + name=utils.make_name('Imag_' + node.name)) + + real_part2 = ctx.make_node( + 'Pow', inputs=[real_part.output[0], p2.name], + name=utils.make_name(real_part.name + 'p2p')) + + imag_part2 = ctx.make_node( + 'Pow', inputs=[imag_part.output[0], p2.name], + name=utils.make_name(imag_part.name + 'p2p')) + + ctx.remove_node(node.name) + add = ctx.make_node( + "Add", inputs=[real_part2.output[0], imag_part2.output[0]], + name=utils.make_name('ComplexAbs_' + node.name)) + + squeezed = GraphBuilder(ctx).make_squeeze( + {'data': add.output[0], 'axes': [0]}, name=utils.make_name('ComplexAbs' + node.name), return_node=True) + + last_node = ctx.make_node( + "Sqrt", inputs=squeezed.output[:1], + name=utils.make_name('ComplexAbs' + node.name), + shapes=[shape[1:]], dtypes=[onnx_dtype]) + + ctx.replace_all_inputs(node.output[0], last_node.output[0]) # ops=ctx.get_nodes() + + @classmethod + def version_1(cls, ctx, node, **kwargs): + cls.any_version(1, ctx, node, **kwargs) + + @classmethod + def version_13(cls, ctx, node, **kwargs): + cls.any_version(13, ctx, node, **kwargs) diff --git a/lib/python3.10/site-packages/tf2onnx/onnx_opset/tensor.py b/lib/python3.10/site-packages/tf2onnx/onnx_opset/tensor.py new file mode 100644 index 0000000000000000000000000000000000000000..af3798a842141d01bec9aa040f77ad1dbd35d180 --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/onnx_opset/tensor.py @@ -0,0 +1,3624 @@ +# SPDX-License-Identifier: Apache-2.0 + + +""" +tensor +""" + +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals + +import logging +import sys + +import numpy as np +from onnx import onnx_pb, helper +from onnx.onnx_pb import TensorProto + +from tf2onnx import constants, utils +from tf2onnx.graph_builder import GraphBuilder +from tf2onnx.handler import tf_op +from tf2onnx.onnx_opset import nn, math +from tf2onnx.constants import NCHW_TO_NHWC, NHWC_TO_NCHW + +logger = logging.getLogger(__name__) + + +# pylint: disable=unused-argument,missing-docstring,unused-variable,pointless-string-statement,invalid-name + + +def _convert_shapenode_to_int64(ctx, node, input_number): + """cast int32 shape into int64 shape.""" + name = node.input[input_number] + + cast_node = ctx.insert_new_node_on_input(node, "Cast", name, to=onnx_pb.TensorProto.INT64) + ctx.set_dtype(cast_node.output[0], onnx_pb.TensorProto.INT64) + ctx.copy_shape(name, cast_node.output[0]) + + +def _wrap_concat_with_cast(ctx, node): + """wrap concat in casts for opset < 8 since it only supports.""" + supported_types = [onnx_pb.TensorProto.FLOAT, onnx_pb.TensorProto.FLOAT16] + dtype = ctx.get_dtype(node.output[0]) + need_casting = dtype not in supported_types + if need_casting: + output_name = node.output[0] + # cast each inputs to float + for i, inp in enumerate(node.inputs): + input_cast = ctx.insert_new_node_on_input(node, "Cast", node.input[i], + to=onnx_pb.TensorProto.FLOAT) + ctx.set_dtype(input_cast.output[0], onnx_pb.TensorProto.FLOAT) + next_nodes = ctx.find_output_consumers(node.output[0]) + # cast output back to dtype unless the next op is a cast + if next_nodes[0].type != "Cast": + output_cast = ctx.insert_new_node_on_output("Cast", output_name, name=node.child_name(), + to=dtype) + ctx.set_dtype(output_cast.output[0], dtype) + ctx.copy_shape(output_name, output_cast.output[0]) + + +@tf_op("Size") +class Size: + @classmethod + def version_1(cls, ctx, node, **kwargs): + output_name = node.output[0] + dtype = ctx.get_dtype(output_name) + # TF size can output int32 or int64 but onnx only does int 64 + if dtype != onnx_pb.TensorProto.INT64: + ctx.set_dtype(output_name, onnx_pb.TensorProto.INT64) + output_cast = ctx.insert_new_node_on_output("Cast", output_name, name=node.child_name(), + to=dtype) + ctx.set_dtype(output_cast.output[0], dtype) + ctx.copy_shape(output_name, output_cast.output[0]) + + +@tf_op("Flatten") +class Flatten: + @classmethod + def version_1(cls, ctx, node, **kwargs): + pass + + @classmethod + def version_9(cls, ctx, node, **kwargs): + # no change for us + cls.version_1(ctx, node, **kwargs) + + @classmethod + def version_11(cls, ctx, node, **kwargs): + # no change + cls.version_1(ctx, node, **kwargs) + + +@tf_op("Dropout") +class Dropout: + @classmethod + def version_1(cls, ctx, node, **kwargs): + pass + + @classmethod + def version_6(cls, ctx, node, **kwargs): + pass + + @classmethod + def version_7(cls, ctx, node, **kwargs): + pass + + @classmethod + def version_10(cls, ctx, node, **kwargs): + pass + + @classmethod + def version_12(cls, ctx, node, **kwargs): + pass + + +@tf_op("Identity") +class Identity: + @classmethod + def version_1(cls, ctx, node, **kwargs): + if node.inputs[0].is_const(): + # should not remove the identity node if it is output of the graph + if node.output[0] in ctx.outputs: + return + # if identity has a const as input, remove it + input_name = node.input[0] + output_name = node.output[0] + ctx.replace_all_inputs(output_name, input_name) # ops=ctx.get_nodes() + ctx.remove_node(node.name) + + +@tf_op("IdentityN") +class IdentityN: + @classmethod + def version_1(cls, ctx, node, **kwargs): + ctx.remove_node(node.name) + for input_name, output_name in zip(node.input, node.output): + ctx.replace_all_inputs(output_name, input_name) # ops=ctx.get_nodes() + + +@tf_op("Reshape") +class Reshape: + @classmethod + def version_1(cls, ctx, node, **kwargs): + # T output = Reshape(T tensor, Tshape shape, @type Tshape) + # T reshaped = Reshape(T data, @INTS shape) - but takes a optional 2nd input for shape + shape_node = node.inputs[1] + shape = shape_node.get_tensor_value() + if shape is None: + logger.error("Reshape on node %s does not have a const shape", node.name) + return + ctx.remove_input(node, node.input[1], 1) + node.set_attr("shape", shape) + ctx.set_shape(node.output[0], shape) + + @classmethod + def version_5(cls, ctx, node, **kwargs): + dtype = ctx.get_dtype(node.output[0]) + need_casting = dtype in [onnx_pb.TensorProto.INT32, + onnx_pb.TensorProto.INT16, + onnx_pb.TensorProto.INT64] + # onnx wants reshape.input[1] to have the value be int64 which is not the case for tensorflow. + _convert_shapenode_to_int64(ctx, node, 1) + if ctx.opset >= 8 or not need_casting: + # onnx reshape can handle the type - done + return + + # onnx < opset 8 does not know reshape for other types than float*, wrap the reshape in casts + input_cast = ctx.insert_new_node_on_input(node, "Cast", node.input[0], to=onnx_pb.TensorProto.FLOAT) + ctx.copy_shape(node.output[0], input_cast.output[0]) + + # if the next node is already a cast we don't need to insert another one + next_nodes = ctx.find_output_consumers(node.output[0]) + if len(next_nodes) != 1 or next_nodes[0].type != "Cast": + output_cast = ctx.insert_new_node_on_output("Cast", node.output[0], name=node.child_name(), + to=dtype) + ctx.set_dtype(output_cast.output[0], dtype) + ctx.copy_shape(node.output[0], output_cast.output[0]) + + +@tf_op("Squeeze") +class Squeeze: + @classmethod + def version_1(cls, ctx, node, **kwargs): + # T output = Squeeze(T input, @list(int) squeeze_dims) + # T squeezed = Squeeze(T data, @AttrType.INTS axes), axes are list of positive integers. + axes = node.get_attr_value("squeeze_dims") + if axes is None: + axes = [] + else: + del node.attr["squeeze_dims"] + + # TF uses empty axes to indicate that all 1 dims should be squeezed + if len(axes) > 0: + neg_axis = any([val < 0 for val in axes]) + if neg_axis and ctx.opset < 11: + shape = ctx.get_shape(node.input[0]) + utils.make_sure(shape is not None, "squeeze with negative axes and unknown rank requires opset >= 11") + shape_len = len(shape) + axes = [a + shape_len if a < 0 else a for a in axes] + if ctx.opset < 13: + node.set_attr("axes", axes) + else: + axes_const = ctx.make_const(utils.make_name("axes_const"), np.array(axes, dtype=np.int64)) + ctx.replace_inputs(node, [node.input[0], axes_const.output[0]]) + + @classmethod + def version_11(cls, ctx, node, **kwargs): + # Opset 11 supports negative axis, but core logic is same + cls.version_1(ctx, node, **kwargs) + + @classmethod + def version_13(cls, ctx, node, **kwargs): + # Opset 13: parameters moved to inputs + cls.version_1(ctx, node, **kwargs) + + +@tf_op("Transpose") +class Transpose: + @classmethod + def version_1(cls, ctx, node, **kwargs): + # T y = Transpose(T x, Tperm perm, @type Tperm) + # T transposed = Transpose(T data, @INTS perm) + if len(node.input) > 1: + perm = node.inputs[1] + if perm.is_const(): + # perms is passed as const + dims = perm.get_tensor_value() + ctx.remove_input(node, node.input[1], 1) + node.set_attr("perm", dims) + else: + utils.make_sure(False, "perm can't be dynamic in ONNX") + else: + # graph rewrite moved perm to attribute + pass + + +@tf_op("Concat") +class Concat: + @classmethod + def version_1(cls, ctx, node, **kwargs): + # old concat op has axis as input[0] + node.type = "Concat" + axis_node = node.inputs[0] + axis_val = axis_node.get_tensor_value() + ctx.remove_input(node, node.input[0], 0) + + if axis_val < 0: # onnxruntime does not support -1 axis, but TF supports. + input_shape = ctx.get_shape(node.input[0]) + axis_val = len(input_shape) + axis_val + node.set_attr("axis", axis_val) + + if ctx.opset < 8: + # opset < 8: might need to wrap concat in casts since only float is supported + _wrap_concat_with_cast(ctx, node) + return + + @classmethod + def version_11(cls, ctx, node, **kwargs): + # Opset 11 supports negative axis, but core logic is same + cls.version_1(ctx, node, **kwargs) + + +@tf_op("ConcatV2") +class ConcatV2: + @classmethod + def version_1(cls, ctx, node, **kwargs): + # T output = ConcatV2(T values, Tidx axis, @int N, @type Tidx) + # T concat_result = Concat(T inputs, @INT axis) + # if any input is empty, remove the input and concat the others + # NOTE: workaround for https://github.com/Microsoft/onnxruntime/issues/681 + node.type = "Concat" + removed_indices = [] + for i, inp in enumerate(node.inputs): + if inp.is_const() and inp.get_tensor_value(as_list=False).size == 0: + removed_indices.append(i) + for i in reversed(removed_indices): + ctx.remove_input(node, node.input[i], i) + # all inputs are deleted + if not node.input: + raise RuntimeError("all inputs of {} are empty".format(node.name)) + + axis_node = node.inputs[-1] + utils.make_sure(axis_node.is_const(), "{} needs to be const".format(axis_node.name)) + axis_val = axis_node.get_tensor_value() + ctx.remove_input(node, node.input[-1], len(node.input) - 1) + + if axis_val < 0: # onnxruntime does not support -1 axis, but TF supports. + input_shape = ctx.get_shape(node.input[0]) + utils.make_sure(input_shape is not None, "shape of {} is None".format(node.input[0])) + axis_val = len(input_shape) + axis_val + node.set_attr("axis", axis_val) + + if ctx.opset < 8: + # opset < 8: might need to wrap concat in casts since only float is supported + _wrap_concat_with_cast(ctx, node) + return + + +@tf_op("Slice") +class Slice: + @classmethod + def version_1(cls, ctx, node, **kwargs): + # T output = Slice(T input, Index begin, Index size) + # T output = Slice(T input, Tind starts, Tind ends, Tind axes, Tind steps) + # "ends" are exclusive, "axes" and "steps" are optional, their default val are [0, ...] and 1 + input_tensor = node.input[0] + starts = node.input[1] + size = node.input[2] + # in tf, size can be -1 which means all elem are taken, so size can't be added starts directly. + # the way to make sure size are not less than 0: set "sizes"'s elem to be int_max if elem val is -1 + size_dtype = ctx.get_dtype(size) + size_np_dtype = utils.map_onnx_to_numpy_type(size_dtype) + if ctx.get_node_by_output(size).is_const() and ctx.get_node_by_output(starts).is_const(): + starts = ctx.get_node_by_output(starts).get_tensor_value() + sizes = ctx.get_node_by_output(size).get_tensor_value() + ends = [] + for start, size in zip(starts, sizes): + # get all elements + if size == -1: + dtype = ctx.get_dtype(node.input[1]) + utils.make_sure(dtype, "dtype of {} is None".format(node.input[1])) + utils.make_sure(dtype, "dtype of {} is None".format(node.input[1])) + ends.append(np.iinfo(dtype).max) + else: + ends.append(start + size) + + else: + neg_one_val = np.array([-1]).astype(size_np_dtype) + neg_one = ctx.make_const(utils.make_name("const"), neg_one_val).output[0] + + int_max_val = np.array([utils.get_max_value(size_np_dtype)]).astype(size_np_dtype) + int_max = ctx.make_const(utils.make_name("largest_int_val"), int_max_val).output[0] + + size_are_neg_one_flag = ctx.make_node("Equal", [neg_one, size]).output[0] + size_are_neg_one_flag = ctx.make_node("Cast", [size_are_neg_one_flag], attr={"to": size_dtype}).output[0] + value_to_add = ctx.make_node("Mul", [int_max, size_are_neg_one_flag]).output[0] + size_processed = ctx.make_node("Add", [size, value_to_add]).output[0] + ends = ctx.make_node("Add", [starts, size_processed]).output[0] + + ctx.remove_node(node.name) + inputs_map = {"data": input_tensor, "starts": starts, "ends": ends} + kwargs = {**inputs_map, "outputs": node.output} + _ = GraphBuilder(ctx).make_slice(kwargs, name=node.name) + + @classmethod + def version_10(cls, ctx, node, **kwargs): + cls.version_1(ctx, node, **kwargs) + + @classmethod + def version_11(cls, ctx, node, **kwargs): + cls.version_1(ctx, node, **kwargs) + + +@tf_op("Roll") +class Roll: + @classmethod + def any_version(cls, opset, ctx, node, **kwargs): + utils.make_sure(node.inputs[2].is_const(), "Can only convert Roll is axis is const") + axes = node.inputs[2].get_tensor_value() + if axes == -1: + axes = len(ctx.get_shape(node.input[0])) + axes + if not isinstance(axes, list): + axes = [axes] + shifts_dtype = ctx.get_dtype(node.input[1]) + if shifts_dtype != TensorProto.INT64: + shifts_casted = ctx.insert_new_node_on_input(node, "Cast", node.input[1], to=TensorProto.INT64).output[0] + else: + shifts_casted = node.input[1] + + if len(axes) == 1: + unsqueeze_node = GraphBuilder(ctx).make_unsqueeze( + {'data': shifts_casted, "axes": [0]}, op_name_scope=node.name, return_node=True) + shifts_split = [unsqueeze_node.output[0]] + else: + shifts_split = ctx.make_node("Split", [shifts_casted], attr={'axis': 0}, + output_count=len(axes), op_name_scope=node.name).output + + zero_const = ctx.make_const(utils.make_name("zeros_const"), np.array([0], np.int64)).output[0] + shape_node = ctx.make_node("Shape", [node.input[0]], op_name_scope=node.name) + + data = node.input[0] + + for axis, shift in zip(axes, shifts_split): + len_along_axis = GraphBuilder(ctx).make_slice( + {"data": shape_node.output[0], "ends": [axis + 1], "starts": [axis]}) + remaining_len = ctx.make_node("Sub", [len_along_axis, shift], op_name_scope=node.name).output[0] + axes_const = ctx.make_const(utils.make_name("axes_const"), np.array([axis], np.int64)).output[0] + slice_one = ctx.make_node("Slice", [data, zero_const, remaining_len, axes_const], op_name_scope=node.name) + slice_two = ctx.make_node("Slice", [data, remaining_len, len_along_axis, axes_const], + op_name_scope=node.name) + concat_node = ctx.make_node("Concat", [slice_two.output[0], slice_one.output[0]], + attr={'axis': axis}, op_name_scope=node.name) + data = concat_node.output[0] + + ctx.replace_all_inputs(node.output[0], data) + ctx.remove_node(node.name) + + @classmethod + def version_10(cls, ctx, node, **kwargs): + cls.any_version(10, ctx, node, **kwargs) + + @classmethod + def version_11(cls, ctx, node, **kwargs): + cls.any_version(11, ctx, node, **kwargs) + + @classmethod + def version_13(cls, ctx, node, **kwargs): + # Parameters moved to inputs for operator Squeeze, Unsqueeze. + cls.any_version(13, ctx, node, **kwargs) + + +@tf_op("Gather") +class Gather: + @classmethod + def version_1(cls, ctx, node, **kwargs): + node.type = "Gather" + + @classmethod + def version_11(cls, ctx, node, **kwargs): + # no change + cls.version_1(ctx, node, **kwargs) + + +@tf_op("GatherV2") +class GatherV2: + @classmethod + def version_1(cls, ctx, node, **kwargs): + # for GatherV2 axis come as input + err_msg = "Opset 12 required for batch_dims attribute of GatherV2" + utils.make_sure(node.get_attr_value("batch_dims", 0) == 0, err_msg) + node.type = "Gather" + utils.make_sure(node.inputs[2].is_const(), "Axis of GatherV2 node must be constant") + axis = node.inputs[2].get_tensor_value() + ctx.remove_input(node, node.input[2], 2) + node.set_attr("axis", axis) + + @classmethod + def version_11(cls, ctx, node, **kwargs): + # no change + cls.version_1(ctx, node, **kwargs) + + @classmethod + def version_12(cls, ctx, node, **kwargs): + batch_dims = node.get_attr_value("batch_dims", 0) + if batch_dims == 0: + cls.version_1(ctx, node, **kwargs) + return + # If batch_dims is not zero, use GatherND to simulate Gather with batch dims. + data_inp, indices_inp, axis_inp = node.input + utils.make_sure(node.inputs[2].is_const(), "Axis of GatherV2 node must be constant") + axis = node.inputs[2].get_tensor_value() + ctx.remove_input(node, axis_inp, 2) + if ctx.get_dtype(indices_inp) != TensorProto.INT64: + indices_inp = ctx.make_node("Cast", [indices_inp], attr={'to': TensorProto.INT64}).output[0] + unperm = None + # GatherND doesn't take an axis so we have to transpose stuff around + if axis != batch_dims: + data_rank = ctx.get_rank(data_inp) + indices_rank = ctx.get_rank(indices_inp) + result_rank = data_rank + indices_rank - 1 - batch_dims + shift_amt = axis - batch_dims + err_msg = "Cannot convert GatherV2 with batch dims since inputs have unknown ranks." + utils.make_sure(data_rank is not None and indices_rank is not None, err_msg) + perm = list(range(data_rank)) + perm = perm[:batch_dims] + perm[axis:axis+1] + perm[batch_dims:axis] + perm[axis+1:] + data_inp = ctx.make_node("Transpose", [data_inp], attr={'perm': perm}).output[0] + ctx.replace_input(node, node.input[0], data_inp, 0) + unperm = list(range(result_rank)) + j = indices_rank+shift_amt + unperm = unperm[:batch_dims] + unperm[indices_rank:j] + unperm[batch_dims:indices_rank] + unperm[j:] + node.type = "GatherND" + unsqueeze_node = GraphBuilder(ctx).make_unsqueeze({'data': indices_inp, 'axes': [-1]}) + ctx.replace_input(node, node.input[1], unsqueeze_node, 1) + if unperm is not None: + ctx.update_node_shape_dtype(node, override=True) + ctx.insert_new_node_on_output("Transpose", node.output[0], perm=unperm) + + +def _make_gathernd_inner_loop(ctx, params, index, dtype): + """create the inner loop for GatherNd.""" + # gather_cur = params + # for (int i = 0; i < size(index); i++) + # gather_res = gather(gather_cur, index[i]) + scope_name = utils.make_name("gathernd_inner_loop") + trip_node = ctx.make_node("Size", [index.output[0]]) + cond_const = ctx.make_const(utils.make_name("cond"), np.ones((), dtype=np.bool)) + trip_name = utils.make_name("i") + cond_name = utils.make_name("cond") + cond_out_name = utils.make_name("cond_out") + cur_name = utils.make_name("gather_cur") + result_name = utils.make_name("res") + + # body graph creation + g = ctx.create_new_graph_with_same_config() + g.add_graph_input(trip_name, TensorProto.INT64, [1]) + g.add_graph_input(cond_name, TensorProto.BOOL, []) + g.add_graph_input(cur_name, dtype, []) + g.parent_graph = ctx + + index_i = g.make_node("Gather", [index.output[0], trip_name], attr={"axis": 0}) + gather = g.make_node("Gather", [cur_name, index_i.output[0]], attr={"axis": 0}) + GraphBuilder(g).make_squeeze( + {'data': gather.output[0], "axes": [0], 'outputs': [result_name]}) + g.make_node("Identity", [cond_name], outputs=[cond_out_name]) + + g.add_graph_output(cond_out_name, TensorProto.BOOL, []) + g.add_graph_output(result_name, dtype, []) + + branches = {"body": g} + inner_loop = ctx.make_node("Loop", + [trip_node.output[0], cond_const.output[0], params], + op_name_scope=scope_name, skip_conversion=False, branches=branches) + return inner_loop + + +def make_gathernd(ctx, params, indices, output, scope_name, t_params, shapes, dtypes): + """make GatherNd op.""" + # Tparams output = GatherNd(Tparams params, Tidx indices) + scope_name = utils.make_name(scope_name) + # reshape indices into [sum(indices[:-1]), indices[-1]] + indices_shape = ctx.make_node("Shape", [indices], dtypes=[TensorProto.INT64]) + indices_size = ctx.make_node("Size", [indices]) + attr = {"axes": [0], "ends": [sys.maxsize], "starts": [-1]} + inputs_map = {"data": indices_shape.output[0], **attr} + inner_shape = GraphBuilder(ctx).make_slice(inputs_map, dtypes=[TensorProto.INT64]) + outter_shape = ctx.make_node("Div", + [indices_size.output[0], inner_shape], + dtypes=[TensorProto.INT64]) + flatten_shape = ctx.make_node("Concat", + [outter_shape.output[0], inner_shape], + attr={"axis": 0}, + dtypes=[TensorProto.INT64]) + flatten_indices = ctx.make_node("Reshape", [indices, flatten_shape.output[0]]) + + # outter loop for each index + # for (int i=0; i= 0]) + for i, v in enumerate(split): + if v == -1: + split[i] = final_sum - sums + ctx.remove_input(node, node.input[2], 2) + ctx.remove_input(node, node.input[1], 1) + node.set_attr("split", split) + node.set_attr("axis", split_dims) + + @classmethod + def version_2(cls, ctx, node, **kwargs): + cls.version_1(ctx, node, **kwargs) + + @classmethod + def version_13(cls, ctx, node, **kwargs): + # Split now supports dynamic split lengths + if node.inputs[1].is_const(): + # Call version 1 to deal with -1 cases + cls.version_1(ctx, node, **kwargs) + # Convert attr to input + split_val = node.get_attr_value("split") + split_const = ctx.make_const(utils.make_name("split"), np.array(split_val, np.int64)) + ctx.replace_inputs(node, [node.input[0], split_const.output[0]]) + del node.attr["split"] + else: + # Technically incorrect if any of the splits are -1 + node.type = "Split" + split_dims = node.inputs[2].get_tensor_value() + ctx.remove_input(node, node.input[2], 2) + node.set_attr("axis", split_dims) + if ctx.get_dtype(node.input[1]) != TensorProto.INT64: + ctx.insert_new_node_on_input(node, "Cast", node.input[1], to=TensorProto.INT64) + + +@tf_op("ExpandDims") +class ExpandDims: + @classmethod + def version_1(cls, ctx, node, **kwargs): + shape = ctx.get_shape(node.output[0]) + dim_node = node.inputs[1] + + utils.make_sure(dim_node.is_const(), "ExpandDims with non-const axes requires opset 13") + node.type = "Unsqueeze" + # tf.expanddims() wants a scalar per doc but quietly accepts any single-element tensor + axis = dim_node.get_tensor_value(as_list=False).flatten()[0] + + if axis < 0 and ctx.opset < 11: + utils.make_sure(shape is not None, "ExpandDims with negative axes and unknown rank requires opset >= 11") + out_rank = len(shape) + axis += out_rank + node.set_attr("axes", [axis]) + ctx.remove_input(node, node.input[1], 1) + + @classmethod + def version_7(cls, ctx, node, **kwargs): + cls.version_1(ctx, node, **kwargs) + + @classmethod + def version_11(cls, ctx, node, **kwargs): + cls.version_1(ctx, node, **kwargs) + + @classmethod + def version_13(cls, ctx, node, **kwargs): + # Parameters moved to inputs for operator Squeeze, Unsqueeze. + if ctx.get_dtype(node.input[1]) != onnx_pb.TensorProto.INT64: + ctx.insert_new_node_on_input(node, "Cast", node.input[1], to=onnx_pb.TensorProto.INT64) + if ctx.get_shape(node.input[1]) != [1]: + const_newshape = ctx.make_const(utils.make_name("reshape_const"), np.array([1], dtype=np.int64)) + reshape_node = ctx.make_node("Reshape", [node.input[1], const_newshape.output[0]]) + ctx.replace_inputs(node, [node.input[0], reshape_node.output[0]]) + node.type = "Unsqueeze" + + +@tf_op("StridedSlice") +class StridedSlice: + @classmethod + def version_1(cls, ctx, node, **kwargs): + # for now we implement common cases. Things like strides!=1 are not mappable to onnx. + not_supported_attr = ["new_axis_mask"] + for attr_name in not_supported_attr: + attr = node.get_attr(attr_name) + if attr is not None and attr.i != 0: + raise ValueError("StridedSlice: attribute " + attr_name + " not supported") + + onnx_dtype = ctx.get_dtype(node.input[1]) + np_dtype = utils.ONNX_TO_NUMPY_DTYPE[onnx_dtype] + max_size = np.iinfo(np_dtype).max + begin = node.inputs[1].get_tensor_value() + end = node.inputs[2].get_tensor_value() + strides = node.inputs[3].get_tensor_value() + end_mask = node.get_attr("end_mask") + end_mask = end_mask.i if end_mask is not None else 0 + begin_mask = node.get_attr("begin_mask") + begin_mask = begin_mask.i if begin_mask is not None else 0 + shrink_axis_mask = node.get_attr("shrink_axis_mask") + shrink_axis_mask = shrink_axis_mask.i if shrink_axis_mask is not None else 0 + ellipsis_mask = node.get_attr("ellipsis_mask") + ellipsis_mask = ellipsis_mask.i if ellipsis_mask is not None else 0 + new_begin = [] + new_end = [] + axes = [] + # onnx slice op can't remove a axis, track axis and add a squeeze op if needed + needs_squeeze = [] + # ellipsis: one bit at most can be 1. An ellipsis implicitly creates as many range specifications as + # necessary to fully specify the sliced range for every dimension. + # For example for a 4-dimensional tensor foo the slice foo[2, ..., 5:8] implies foo[2, :, :, 5:8] + # NOTE: we ignore those axes denoted by ellipsis using `axes` attribute + ellipsis_gap = 0 + for idx, begin_item in enumerate(begin): + if strides[idx] != 1: + raise ValueError("StridedSlice: only strides=1 is supported") + if (ellipsis_mask >> idx) & 1: + input_shape = ctx.get_shape(node.input[0]) + utils.make_sure( + input_shape is not None, + "StridedSlice op {} requires the shape of input".format(node.name) + ) + ellipsis_gap = len(input_shape) - len(begin) + continue + + # ignore ellipsis axes + axes.append(idx + ellipsis_gap) + end_item = end[idx] + + # an implicit condition is stride == 1 (checked in above) + if begin_item < 0 and end_item == 0: + end_item = max_size + + mask = (shrink_axis_mask >> idx) & 1 + if mask != 0: + new_begin.append(begin_item) + end_item = begin_item + 1 if begin_item != -1 else max_size + new_end.append(end_item) + needs_squeeze.append(idx + ellipsis_gap) + continue + + mask = (begin_mask >> idx) & 1 + if mask != 0: + new_begin.append(0) + else: + new_begin.append(begin_item) + + mask = (end_mask >> idx) & 1 + if mask != 0: + new_end.append(max_size) + else: + new_end.append(end_item) + + out_dtypes = [ctx.get_dtype(node.output[0])] + out_shapes = [ctx.get_shape(node.output[0])] + ctx.remove_node(node.name) + + attr = {"starts": new_begin, "ends": new_end, "axes": axes} + inputs_map = {"data": node.input[0], **attr} + kwargs = {**inputs_map, "outputs": node.output} + node = GraphBuilder(ctx).make_slice( + kwargs, name=node.name, dtypes=out_dtypes, shapes=out_shapes, return_node=True) + nodes = [node] + if needs_squeeze: + # insert_new_node_on_output(self, op_type, output_name=None, name=None, inputs=None, domain=None, **kwargs) + # ctx.insert_new_node_on_output("Squeeze", node.output[0], name) + name = utils.make_name(node.name) + squeeze_node = GraphBuilder(ctx).make_squeeze( + {"axes": needs_squeeze, 'data': node.output[0]}, name=name, return_node=True) + ctx.insert_node_on_output(squeeze_node) + + nodes.append(squeeze_node) + input_dtype = ctx.get_dtype(node.output[0]) + ctx.set_dtype(squeeze_node.output[0], input_dtype) + ctx.copy_shape(node.output[0], squeeze_node.output[0]) + + # onnx slice as of opset 7 does only take float tensors ... cast if needed + input_dtype = ctx.get_dtype(node.input[0]) + if ctx.opset < 9: + if input_dtype != onnx_pb.TensorProto.FLOAT: + if node.inputs[0].type == "Cast" and len(ctx.find_output_consumers(node.inputs[0].output[0])) == 1: + # override the previous cast + cast_node = node.inputs[0] + cast_node.set_attr("to", onnx_pb.TensorProto.FLOAT) + else: + cast_node = ctx.insert_new_node_on_input(node, "Cast", node.input[0], + to=onnx_pb.TensorProto.FLOAT) + nodes.insert(0, cast_node) + ctx.set_dtype(cast_node.output[0], onnx_pb.TensorProto.FLOAT) + ctx.copy_shape(node.input[0], cast_node.output[0]) + # undo the cast afer slice + name = utils.make_name(node.name) + cast_node = ctx.insert_new_node_on_output("Cast", nodes[-1].output[0], name, + to=input_dtype) + ctx.set_dtype(cast_node.output[0], input_dtype) + ctx.copy_shape(node.output[0], cast_node.output[0]) + nodes.append(cast_node) + + @classmethod + def any_version_after10(cls, opset, ctx, node, **kwargs): + # T output = Slice(T input, Index begin, Index end, Index strides + # @int begin_mask, @int end_mask, @int ellipsis_mask + # @int shrink_axis_mask, @int new_axis_mask) + # T output = Slice(T input, Tind starts, Tind ends, Tind axes, Tind steps) + # "ends" are exclusive, "axes" and "steps" are optional, their default val are [0, ...] and 1 + input_x = node.inputs[0] + begin = node.inputs[1] + end = node.inputs[2] + strides = node.inputs[3] + new_axis_mask = node.get_attr("new_axis_mask") + new_axis_mask = new_axis_mask.i if new_axis_mask is not None else 0 + + if begin.is_const() and end.is_const() and strides.is_const() \ + and all(val == 1 for val in strides.get_tensor_value()) \ + and new_axis_mask == 0: + cls.version_1(ctx, node, **kwargs) + return + + onnx_dtype = ctx.get_dtype(node.input[1]) + np_dtype = utils.ONNX_TO_NUMPY_DTYPE[onnx_dtype] + + # NOTE: Max op only supports float32, deal with overflow when cast back to int32 + # enable it after Max supports int32 and int64 + # max_size = utils.get_max_value(np_dtype) + # min_size = utils.get_min_value(np_dtype) + max_size = 1e9 + min_size = -1e9 + + end_mask = node.get_attr("end_mask") + end_mask = end_mask.i if end_mask is not None else 0 + begin_mask = node.get_attr("begin_mask") + begin_mask = begin_mask.i if begin_mask is not None else 0 + ellipsis_mask = node.get_attr("ellipsis_mask") + ellipsis_mask = ellipsis_mask.i if ellipsis_mask is not None else 0 + shrink_axis_mask = node.get_attr("shrink_axis_mask") + shrink_axis_mask = shrink_axis_mask.i if shrink_axis_mask is not None else 0 + + param_shape = ctx.get_shape(node.input[1]) or \ + ctx.get_shape(node.input[2]) or \ + ctx.get_shape(node.input[3]) + utils.make_sure( + param_shape is not None, + "StridedSlice op {} requires the shape of begin/end/strides".format(node.name) + ) + param_rank = param_shape[0] + + if new_axis_mask != 0: + unqueeze_at = [] + ellipsis_gap = 0 + num_new = 0 + for bit in range(32): + if (new_axis_mask >> bit) & 1 == 1: + num_new += 1 + if (ellipsis_mask >> bit) & 1: + input_shape = ctx.get_shape(input_x.output[0]) + # calculate what rank for ellipsis: input rank - (being rank - all new_axis - 1) + ellipsis_gap = len(input_shape) - param_rank + num_new + 1 + if (new_axis_mask >> bit) & 1 == 1: + unqueeze_at.append(bit + ellipsis_gap) + begin_mask |= 1 << bit + end_mask |= 1 << bit + + input_x = GraphBuilder(ctx).make_unsqueeze( + {'data': input_x.output[0], 'axes': unqueeze_at}, return_node=True) + + + # use in onnx graph to mask begin + new_begin_mask = [1] * param_rank + # use in onnx graph to mask end + new_end_mask = [min_size] * param_rank + # for shrink mask, if shrink mask is 1, set stride to be max_size + shrink_strided_mask = [min_size] * param_rank + axes = [] + # onnx slice op can't remove a axis, track axis and add a squeeze op if needed + needs_squeeze = [] + ellipsis_gap = 0 + for idx in range(param_rank): + if (ellipsis_mask >> idx) & 1: + input_shape = ctx.get_shape(input_x.output[0]) + utils.make_sure( + input_shape is not None, + "StridedSlice op {} requires the shape of input".format(node.name) + ) + ellipsis_gap = len(input_shape) - param_rank + # handle the redundant param + new_begin_mask[idx] = 0 + new_end_mask[idx] = max_size + axes.append(idx) + continue + + # ignore ellipsis axes + axes.append(idx + ellipsis_gap) + + mask = (shrink_axis_mask >> idx) & 1 + if mask != 0: + shrink_strided_mask[idx] = max_size + new_end_mask[idx] = max_size + needs_squeeze.append(idx + ellipsis_gap) + continue + + mask = (begin_mask >> idx) & 1 + if mask != 0: + new_begin_mask[idx] = 0 + + mask = (end_mask >> idx) & 1 + if mask != 0: + new_end_mask[idx] = max_size + + out_dtypes = [ctx.get_dtype(node.output[0])] + out_shapes = [ctx.get_shape(node.output[0])] + ctx.remove_node(node.name) + + # mask begin + new_begin_mask = np.array(new_begin_mask, dtype=np_dtype) + if not np.all(new_begin_mask == 1): + if begin.is_const() and strides.is_const(): + new_begin_vals = np.copy(begin.get_tensor_value(as_list=False)) + strides_vals = strides.get_tensor_value(as_list=False) + idx1 = np.where(new_begin_mask == 0) + idx2 = np.where(strides_vals < 0) + idx3 = np.intersect1d(idx1, idx2) + new_begin_vals[idx3] = max_size + begin = ctx.make_const(utils.make_name("begin_masked"), new_begin_vals) + else: + begin_mask_const = ctx.make_const(utils.make_name("begin_mask"), np.equal(new_begin_mask, 0)) + zero_const = ctx.make_const(utils.make_name("zero_const"), np.zeros(1, dtype=np_dtype)) + max_const = ctx.make_const(utils.make_name("max_const"), np.array(max_size, dtype=np_dtype)) + op1 = ctx.make_node("Less", [strides.output[0], zero_const.output[0]], op_name_scope=node.name) + op2 = ctx.make_node("And", [op1.output[0], begin_mask_const.output[0]], op_name_scope=node.name) + begin = ctx.make_node("Where", [op2.output[0], max_const.output[0], begin.output[0]], + op_name_scope=node.name) + + # mask end + new_end_mask = np.array(new_end_mask, dtype=np_dtype) + end_output = end.output[0] + if not np.all(new_end_mask == min_size): + if end.is_const() and strides.is_const(): + new_end_mask = np.maximum(end.get_tensor_value(as_list=False), new_end_mask) + idx = np.where(new_end_mask == max_size) + sign = np.sign(strides.get_tensor_value(as_list=False))[idx] + new_end_mask[idx] = new_end_mask[idx] * sign + end = ctx.make_const(utils.make_name("end_masked"), new_end_mask) + end_output = end.output[0] + else: + # Overlay new_end_mask with specified end values. + # Adjust max_size to min_size if steps are < 0 + max_const = ctx.make_const(utils.make_name("max_const"), np.array(max_size, dtype=np_dtype)) + min_const = ctx.make_const(utils.make_name("min_const"), np.array(min_size, dtype=np_dtype)) + zero_const = ctx.make_const(utils.make_name("zero_const"), np.zeros(1, dtype=np_dtype)) + end_mask_const = ctx.make_const(utils.make_name("end_mask"), np.array(new_end_mask, dtype=np_dtype)) + outputname = utils.make_name("{}__newendmask".format(node.name)) + new_end_mask = math.make_min_or_max_op(ctx, "Max", [end.output[0], end_mask_const.output[0]], + [outputname]) + op1 = ctx.make_node("Less", [strides.output[0], zero_const.output[0]], op_name_scope=node.name) + op2 = ctx.make_node("Equal", [new_end_mask.output[0], max_const.output[0]], op_name_scope=node.name) + op3 = ctx.make_node("And", [op2.output[0], op1.output[0]], op_name_scope=node.name) + final_end = ctx.make_node("Where", [op3.output[0], min_const.output[0], + new_end_mask.output[0]], op_name_scope=node.name) + end_output = final_end.output[0] + + # mask strides for shrink + shrink_strided_mask = np.array(shrink_strided_mask, dtype=np_dtype) + strides_output = strides.output[0] + if not np.all(shrink_strided_mask == min_size): + if strides.is_const(): + strides = ctx.make_const( + utils.make_name("strides_masked"), + np.maximum(strides.get_tensor_value(as_list=False), shrink_strided_mask) + ) + strides_output = strides.output[0] + else: + shrink_strided_mask_const = ctx.make_const( + utils.make_name("strides_mask"), + np.array(shrink_strided_mask, dtype=np_dtype) + ) + strides_output = utils.make_name("{}__strides".format(node.name)) + math.make_min_or_max_op( + ctx, "Max", + [strides.output[0], shrink_strided_mask_const.output[0]], + [strides_output] + ) + # create axes input + axes_const = ctx.make_const( + utils.make_name("slice_axes"), + np.array(axes, dtype=np_dtype) + ) + axes_output = axes_const.output[0] + + inputs_map = { + "data": input_x.output[0], + "starts": begin.output[0], + "ends": end_output, + "steps": strides_output, + "axes": axes_output + } + kwargs = {**inputs_map, "outputs": node.output} + node = GraphBuilder(ctx).make_slice(kwargs, name=node.name, dtypes=out_dtypes, shapes=out_shapes) + node = ctx.get_node_by_output(node) + if needs_squeeze: + squeeze_node = GraphBuilder(ctx).make_squeeze( + {"axes": needs_squeeze, "data": node.output[0]}, name=node.child_name(), return_node=True) + ctx.insert_node_on_output(squeeze_node, node.output[0]) + input_dtype = ctx.get_dtype(node.output[0]) + ctx.set_dtype(squeeze_node.output[0], input_dtype) + ctx.copy_shape(node.output[0], squeeze_node.output[0]) + + @classmethod + def version_10(cls, ctx, node, **kwargs): + cls.any_version_after10(10, ctx, node, **kwargs) + + @classmethod + def version_13(cls, ctx, node, **kwargs): + # Parameters moved to inputs for operator Squeeze, Unsqueeze. + cls.any_version_after10(13, ctx, node, **kwargs) + + +@tf_op("Cast") +class Cast: + @classmethod + def version_1(cls, ctx, node, **kwargs): + # DstT y = Cast(SrcT x, @type SrcT, @type DstT) + # T2 output = Cast(T1 input, @STRING to) + dst = node.get_attr("to") + dst = utils.ONNX_DTYPE_NAMES[dst] + node.set_attr("to", dst) + + @classmethod + def version_6(cls, ctx, node, **kwargs): + pass + + @classmethod + def version_9(cls, ctx, node, **kwargs): + pass + + +@tf_op("TopKV2", onnx_op="TopK") +class TopKV2: + @classmethod + def version_1(cls, ctx, node, **kwargs): + # T values, int32 indices = TopKV2(T input, int32 k, @bool sorted=true, @realnumbertype T) + # T values, I indices = TopK(T x, @int axis=-1, @int k). I: int64 + topk_node_name = node.name + topk_output1 = node.output[0] + topk_output2 = node.output[1] + + shapes = node.output_shapes + dtypes = node.output_dtypes + k = node.inputs[1].get_tensor_value() + ctx.remove_node(topk_node_name) + new_topk_name = utils.make_name(topk_node_name) + new_topk_node = ctx.make_node("TopK", [node.input[0]], + outputs=[topk_output1, utils.port_name(new_topk_name, 1)], + name=new_topk_name, attr={"k": k}, + shapes=shapes, dtypes=[dtypes[0], onnx_pb.TensorProto.INT64]) + + if dtypes[0] != onnx_pb.TensorProto.FLOAT: + # opset-1 only supports float dtypes + ctx.insert_new_node_on_output("Cast", new_topk_node.input[0], to=onnx_pb.TensorProto.FLOAT) + ctx.insert_new_node_on_output("Cast", new_topk_node.output[0], to=dtypes[0]) + new_cast_name = utils.make_name(topk_node_name) + ctx.make_node("Cast", [new_topk_node.output[1]], outputs=[topk_output2], + name=new_cast_name, attr={"to": onnx_pb.TensorProto.INT32}, + shapes=[shapes[1]], dtypes=[onnx_pb.TensorProto.INT32]) + + @classmethod + def any_version_after10(cls, opset, ctx, node, **kwargs): + # onnx only supports input K as a 1D tesor with dtype int64 + # while in tf, K is a 0D tensor with dtype int32 + dtypes = node.output_dtypes + k_0d = node.input[1] + cast = ctx.make_node("Cast", [k_0d], attr={"to": onnx_pb.TensorProto.INT64}) + k_1d = GraphBuilder(ctx).make_unsqueeze({'data': cast.output[0], "axes": [0]}, return_node=True) + ctx.replace_input(node, k_0d, k_1d.output[0], 1) + # cast X if needed + if dtypes[0] != onnx_pb.TensorProto.FLOAT: + # opset-10 supports types other than float but onnxruntime does not + ctx.insert_new_node_on_output("Cast", node.input[0], to=onnx_pb.TensorProto.FLOAT) + ctx.insert_new_node_on_output("Cast", node.output[0], to=dtypes[0]) + # cast the index output to int32 + cast_out = ctx.insert_new_node_on_output("Cast", node.output[1], name=utils.make_name(node.name), to=dtypes[1]) + ctx.set_dtype(cast_out.output[0], dtypes[1]) + ctx.copy_shape(node.output[1], cast_out.output[0]) + + @classmethod + def version_10(cls, ctx, node, **kwargs): + cls.any_version_after10(10, ctx, node, **kwargs) + + @classmethod + def version_11(cls, ctx, node, **kwargs): + # opset 11 supports negative axis, and new attrs 'largest' and 'sorted' + # the core logic doesn't change, using defaults for new attrs + cls.any_version_after10(11, ctx, node, **kwargs) + + @classmethod + def version_13(cls, ctx, node, **kwargs): + # Parameters moved to inputs for operator Squeeze, Unsqueeze. + cls.any_version_after10(13, ctx, node, **kwargs) + + +@tf_op("Tile") +class Tile: + @classmethod + def version_1(cls, ctx, node, **kwargs): + # onnx wants shape input to be int64 + _convert_shapenode_to_int64(ctx, node, 1) + + +@tf_op("Pack") +class Pack: + @classmethod + def any_version(cls, opset, ctx, node, **kwargs): + # hack to make up for the missing onnx pack op + axis = node.get_attr("axis").i + if axis < 0: + axis += len(ctx.get_shape(node.input[0])) + 1 + + inputs = [] + dtype = None + gb = GraphBuilder(ctx) + # insert Unsqueeze on each input + for i, n in enumerate(node.inputs): + dtype = ctx.get_dtype(node.input[i]) + shape = ctx.get_shape(node.input[i]) + if shape is not None: + shape = shape.copy() + shape.insert(axis, 1) + new_node = gb.make_unsqueeze( + {'data': node.input[i], 'axes': [axis]}, + op_name_scope=node.name, shapes=[shape], dtypes=[dtype], return_node=True) + output_name = new_node.output[0] + ctx.replace_input(node, node.input[i], output_name, i) + inputs.append(output_name) + + shapes = node.output_shapes + dtypes = node.output_dtypes + ctx.remove_node(node.name) + # concat all unqueezes + concat = ctx.make_node("Concat", inputs, op_name_scope=node.name, attr={"axis": axis}, + shapes=shapes, dtypes=dtypes) + ctx.replace_all_inputs(node.output[0], concat.output[0]) # ops=ctx.get_nodes() + + @classmethod + def version_1(cls, ctx, node, **kwargs): + cls.any_version(1, ctx, node, **kwargs) + + @classmethod + def version_13(cls, ctx, node, **kwargs): + # Parameters moved to inputs for operator Squeeze, Unsqueeze. + cls.any_version(13, ctx, node, **kwargs) + + +@tf_op("Unpack") +class Unpack: + @classmethod + def version_1(cls, ctx, node, **kwargs): + # hack to make up for the missing onnx unpack op + # squeeze does not support negative axis + axis = node.get_attr("axis").i + if axis < 0: + shape = ctx.get_shape(node.input[0]) + utils.make_sure(shape is not None, "shape of unpack input is None: {}".format(node.input[0])) + axis += len(shape) + # split the tensor into n outputs + node.type = "Split" + + # for each output we need to squeeze axis + for n in node.output: + op_name = utils.make_name(node.name) + shape = ctx.get_shape(n) + dtype = ctx.get_dtype(n) + squeeze_node = GraphBuilder(ctx).make_squeeze( + {'data': n, 'axes': [axis]}, name=op_name, return_node=True, shapes=[shape], dtypes=[dtype]) + ctx.insert_node_on_output(squeeze_node, n) + + # split node is 1 rank higher than squeeze nodes + output_shape = ctx.get_shape(node.output[0]) + if output_shape: + split_shape = output_shape[:axis] + [1] + output_shape[axis:] + ctx.set_shape(node.output[0], split_shape) + + +@tf_op("OneHot") +class OneHot: + @classmethod + def version_1(cls, ctx, node, **kwargs): + # until there is no onehot op in onnx, a workaround using gather from eye + indices_name = node.input[0] + indices_shape = ctx.get_shape(indices_name) + if len(indices_shape) != 1: + # TODO: this works for rank=1 but tensorflow supports more than this. + # Same principle should work but we need to implemtn our own eye. + raise ValueError("onehot op: only rank1 is supported") + axis = node.get_attr("axis") + # axis becomes axis for gather + node.set_attr("axis", 0) + depth = node.inputs[1].get_tensor_value() + on_val = node.inputs[2].get_tensor_value(as_list=False) + on = on_val.tolist() + off = node.inputs[3].get_tensor_value() + eye = np.eye(depth, dtype=on_val.dtype) + if on != 0: + eye[eye == 1] = on + eye[eye == 0] = off + else: + eye[eye == 0] = off + eye[eye == 1] = on + + const_name = utils.make_name(node.name) + ctx.make_const(const_name, eye) + # setup gather inputs + ctx.replace_inputs(node, [const_name, indices_name]) + node.type = "Gather" + if axis.i == 0: + # TODO: revisit for rank > 1 + name = utils.make_name(node.name) + transpose_node = ctx.insert_new_node_on_output("Transpose", node.output[0], name) + ctx.copy_shape(node.output[0], transpose_node.output[0]) + + @classmethod + def any_version_after9(cls, opset, ctx, node, **kwargs): + # T output = OneHot(uint8/int32/int64 input, T depth, T on-value, T off-value, @int axis, @dtype) + # tf requires that dtype is same as on-value's and off-value's dtype + # in ONNX, op's schema is (input, depth, value, @int axis), meaning of "value" is [off-value, on-value] + # onnxruntime only supports int64 + output_dtype = ctx.get_dtype(node.input[2]) + if ctx.is_target(constants.TARGET_RS6) \ + and output_dtype not in [onnx_pb.TensorProto.INT64, onnx_pb.TensorProto.INT32]: + logger.warning("unsupported dtype in onnxruntime, onehot-9 can't be used directly") + cls.version_1(ctx, node, **kwargs) + return + + depth = GraphBuilder(ctx).make_unsqueeze({'data': node.input[1], 'axes': [0]}) + on_value = node.input[2] + off_value = node.input[3] + on_value = GraphBuilder(ctx).make_unsqueeze({'data': on_value, 'axes': [0]}) + off_value = GraphBuilder(ctx).make_unsqueeze({'data': off_value, 'axes': [0]}) + off_on_value = ctx.make_node("Concat", [off_value, on_value], attr={"axis": 0}).output[0] + + indices = node.input[0] + if ctx.is_target(constants.TARGET_RS6) \ + and ctx.get_dtype(indices) != onnx_pb.TensorProto.INT64: + indices = ctx.make_node("Cast", [indices], attr={"to": onnx_pb.TensorProto.INT64}).output[0] + ctx.replace_input(node, node.input[0], indices, 0) + + if ctx.is_target(constants.TARGET_RS6) \ + and ctx.get_dtype(depth) != onnx_pb.TensorProto.INT64: + depth = ctx.make_node("Cast", [depth], attr={"to": onnx_pb.TensorProto.INT64}).output[0] + ctx.replace_input(node, node.input[1], depth, 1) + + if ctx.is_target(constants.TARGET_RS6) \ + and output_dtype != onnx_pb.TensorProto.INT64: + off_on_value = ctx.make_node("Cast", [off_on_value], attr={"to": onnx_pb.TensorProto.INT64}).output[0] + ctx.replace_input(node, node.input[2], off_on_value, 2) + ctx.remove_input(node, node.input[3], 3) + + if ctx.is_target(constants.TARGET_RS6) \ + and output_dtype != onnx_pb.TensorProto.INT64: + new_node_name = utils.make_name("onehot_output") + new_node = ctx.insert_new_node_on_output("Cast", node.output[0], new_node_name, to=output_dtype) + ctx.set_dtype(new_node.output[0], output_dtype) + ctx.set_shape(new_node.output[0], ctx.get_shape(node.output[0])) + + @classmethod + def version_9(cls, ctx, node, **kwargs): + cls.any_version_after9(9, ctx, node, **kwargs) + + @classmethod + def version_11(cls, ctx, node, **kwargs): + # Opset 11 supports negative axis, but core logic is same + cls.any_version_after9(11, ctx, node, **kwargs) + + @classmethod + def version_13(cls, ctx, node, **kwargs): + # Parameters moved to inputs for operator Squeeze, Unsqueeze. + cls.any_version_after9(13, ctx, node, **kwargs) + + +@tf_op("Shape") +class Shape: + @classmethod + def version_1(cls, ctx, node, **kwargs): + # out_type output = Shape(T input, @int32|int64 out_type), out_type by default int32 + # int64 output = Shape(T input) + dtype = ctx.get_dtype(node.output[0]) + if dtype == onnx_pb.TensorProto.INT64: + return + op_name = utils.make_name(node.name) + output_cast = ctx.insert_new_node_on_output("Cast", node.output[0], name=op_name, to=dtype) + ctx.set_dtype(output_cast.output[0], dtype) + ctx.copy_shape(node.output[0], output_cast.output[0]) + + +@tf_op("IsNan", onnx_op="IsNaN") +class IsNan: + @classmethod + def version_9(cls, ctx, node, **kwargs): + pass + + +@tf_op("BatchToSpaceND", onnx_op="DepthToSpace") +class BatchToSpace: + @classmethod + def any_version(cls, opset, ctx, node, **kwargs): + # block_shape impacts Transpose 'perm' attribute values. + # must be available at compile time + utils.make_sure(node.inputs[1].is_const(), 'only support constant block_shape value.') + + block_shape = node.inputs[1].get_tensor_value(False) + blocklen = len(block_shape) + xlen = len(ctx.get_shape(node.input[0])) + + # if 3d or 4d tensor & square 2d block_shape , can optimize + cond1 = xlen in [3, 4] + cond2 = blocklen == 2 and block_shape[0] == block_shape[1] + if cond1 and cond2: + # https://www.tensorflow.org/api_docs/cc/class/tensorflow/ops/batch-to-space-n-d.html + # the above link says the data format of input tensor should be (batch, spatial_shape, remaining_shape) + # and we only support 3D and 4D here, and the data format is NHC and NHWC + # onnx op "DepthToSpace" does the same work on input tensor except that it works on "C", + # and it only supports NCHW + # T out = BatchToSpaceND(T input, int32 block_shape, int32 crops) + input_tensor = node.inputs[0] + input_shape = ctx.get_shape(input_tensor.output[0]) + + if len(input_shape) == 3: + # insert automatically an Unsqueeze op if the input is 3d + unsqz1 = GraphBuilder(ctx).make_unsqueeze( + {"axes": [3], "data": input_tensor.output[0]}, return_node=True) + # NHWC TO CNHW, so onnx op will work on "N" which is the same as tensorflow + trans1 = ctx.make_node("Transpose", unsqz1.output, {"perm": [3, 0, 1, 2]}) + else: + # Add explicit NHWC_TO_NCHW transpose before and NCHW_TO_NHWC transpose after subgraph. + # That enables more optimizations in TransposeOptimizer. + trans_nchw = ctx.make_node("Transpose", input_tensor.output, {"perm": NHWC_TO_NCHW}) + # NCHW TO CNHW + trans1 = ctx.make_node("Transpose", trans_nchw.output, {"perm": [1, 0, 2, 3]}) + reorganize_node = ctx.make_node(node.type, trans1.output, attr={"blocksize": block_shape[0]}) + + # implement crop logic, the data format is NCHW + slice_axis = [2, 3] + if node.inputs[2].is_const(): + crops = node.inputs[2].get_tensor_value() + top, bottom = crops[0] + left, right = crops[1] + starts = [top, left] + ends = [] + for end in [bottom, right]: + if end != 0: + ends.append(-end) + else: + ends.append(np.iinfo(np.int32).max) + attr = {"axes": slice_axis, "ends": ends, "starts": starts} + else: + shape = ctx.make_const(name=utils.make_name("shape"), np_val=np.array([-1], dtype=np.int64)) + reshape = ctx.make_node("Cast", + ctx.make_node("Reshape", inputs=[node.input[2], shape.output[0]]).output, + attr={"to": utils.map_numpy_to_onnx_dtype(np.int64)}) + crops = ctx.make_node("Split", inputs=reshape.output, attr={}, output_count=4).output + zero = ctx.make_const(name=utils.make_name("zero"), np_val=np.array([0], dtype=np.int64)).output[0] + int32_max = ctx.make_const(name=utils.make_name("int32_max"), + np_val=np.array([np.iinfo(np.int32).max], dtype=np.int64)).output[0] + def crop_to_end(crop): + eq = ctx.make_node("Equal", [crop, zero]) + not_eq = ctx.make_node("Not", eq.output) + cast_eq = ctx.make_node("Cast", eq.output, attr={"to": utils.map_numpy_to_onnx_dtype(np.int64)}) + cast_not_eq = ctx.make_node("Cast", not_eq.output, + attr={"to": utils.map_numpy_to_onnx_dtype(np.int64)}) + neg = ctx.make_node("Neg", cast_not_eq.output) + add = ctx.make_node("Add", + [ + ctx.make_node("Mul", [crop, neg.output[0]]).output[0], + ctx.make_node("Mul", [int32_max, cast_eq.output[0]]).output[0], + ]) + return add.output[0] + + starts = ctx.make_node("Concat", [crops[0], crops[2]], {'axis': 0}) + ends = ctx.make_node("Concat", [crop_to_end(crops[1]), crop_to_end(crops[3])], {'axis': 0}) + axes = ctx.make_const(name=utils.make_name("axes"), np_val=np.array(slice_axis, dtype=np.int64)) + attr = {"axes": axes.output[0], "ends": ends.output[0], "starts": starts.output[0]} + inputs_map = {"data": reorganize_node.output[0], **attr} + dtypes = node.output_dtypes + shapes = node.output_shapes + + ctx.remove_node(node.name) + if len(input_shape) == 3: + # add a squeeze op to convert output into 3d + kwargs = {**inputs_map} + node_slice = GraphBuilder(ctx).make_slice(kwargs) + # CNHW TO NHWC + trans2 = ctx.make_node("Transpose", [node_slice], {"perm": [1, 2, 3, 0]}) + GraphBuilder(ctx).make_squeeze({"axes": [3], "data": trans2.output[0], "outputs": node.output}, + name=node.name, shapes=shapes, dtypes=dtypes) + else: + node_slice = GraphBuilder(ctx).make_slice(inputs_map) + # CNHW TO NCHW + trans2 = ctx.make_node("Transpose", [node_slice], {"perm": [1, 0, 2, 3]}) + ctx.make_node("Transpose", trans2.output, {"perm": NCHW_TO_NHWC}, + name=node.name, outputs=node.output, shapes=shapes, dtypes=dtypes) + else: + def mknode(optype, inputs, attrs=None): + nodename = utils.make_name(node.name + '_' + optype.lower()) + if opset >= 13 and optype == 'Squeeze': + return GraphBuilder(ctx).make_squeeze( + {"axes": attrs['axes'], "data": inputs[0]}, name=nodename, return_node=True) + return ctx.make_node(optype, inputs, attrs, name=nodename) + + + # support non 3D/4D tensors and dynamic crop vals + # dynamic slice starts at opset 10 + utils.make_sure(ctx.opset >= 11, 'non-4D tensor or non-const crops require opset 11') + + input0 = node.input[0] + input2 = node.input[2] + + # const vals + int_max_const, one_const, minus1_const, blocklen_resize_const, \ + blocklenplus1_const, block_shape_const = \ + [n.output[0] for n in ctx.make_consts([[utils.get_max_value(np.int64)], [1], [-1],\ + [-1, blocklen], [blocklen + 1], block_shape])] + + x_shape = ctx.insert_new_node_on_input(node, 'Shape', node.input[0]) + + # get the spatial and depth (i.e remaining) dimensions + # compute target spatial dimensions by multiplying block_shape + spatial = mknode('Slice', [x_shape.output[0], one_const, blocklenplus1_const]) + depth = mknode('Slice', [x_shape.output[0], blocklenplus1_const, int_max_const]) + target_spatial = mknode('Mul', [spatial.output[0], block_shape_const]) + + # shape to use before shuffle (part 1) + ccat1 = mknode('Concat', [spatial.output[0], block_shape_const], {'axis': 0}) + re1 = mknode('Reshape', [ccat1.output[0], blocklen_resize_const]) + tr1 = mknode('Transpose', [re1.output[0]]) + interleave = mknode('Reshape', [tr1.output[0], minus1_const]) + shape1 = mknode('Concat', [minus1_const, interleave.output[0], depth.output[0]], {'axis': 0}) + + # shape to use before shuffle (part 2) + g1 = list(range(2, 2 * blocklen + 1, 2)) + g2 = list(range(1, 2 * blocklen + 1, 2)) + g = g1 + [0] + g2 + list(range(0, xlen + blocklen)[1 + 2 * blocklen:]) + + # permutation values for shuffling + p = np.asarray(range(0, xlen + blocklen)) + p[0] = blocklen + p[1] = blocklen + 1 + p[2] = 0 + for i in range(3, blocklen * 2 + 1): + p[i] = p[i - 2] + 1 + + # reshape to create moving blocks, shuffle, and reshape to target_spatial + indices = ctx.make_consts([list(g)])[0].output[0] + gather = mknode('Gather', [shape1.output[0], indices]) + x2 = mknode('Reshape', [input0, gather.output[0]]) + tr2 = mknode('Transpose', [x2.output[0]], {'perm': np.array(p)}) + shape2 = mknode('Concat', [minus1_const, target_spatial.output[0], depth.output[0]], {'axis': 0}) + x3 = mknode('Reshape', [tr2.output[0], shape2.output[0]]) + + # crop axes + slice_starts_const1, slice_starts_const2, slice_ends_const1, \ + slice_ends_const2, axes_const = \ + [n.output[0] for n in ctx.make_consts([[0, 0], [1, utils.get_max_value(np.int64)], [1, 0],\ + [2, utils.get_max_value(np.int64)], range(1, blocklen + 1)])] + + crop = mknode('Cast', [input2], {'to': TensorProto.INT64}) + crop_transposed = mknode('Transpose', [crop.output[0]]) + crop_starts = mknode('Slice', [crop_transposed.output[0], slice_starts_const1, slice_starts_const2]) + crop_ends = mknode('Slice', [crop_transposed.output[0], slice_ends_const1, slice_ends_const2]) + crop_starts_squeeze = mknode('Squeeze', [crop_starts.output[0]], {'axes': [0]}) + crop_ends_squeeze = mknode('Squeeze', [crop_ends.output[0]], {'axes': [0]}) + end_range = mknode('Sub', [target_spatial.output[0], crop_ends_squeeze.output[0]]) + orig_shape = node.output_shapes + orig_dtypes = node.output_dtypes + ctx.remove_node(node.name) + ctx.make_node('Slice', [x3.output[0], crop_starts_squeeze.output[0], end_range.output[0], axes_const], + name=node.name, outputs=node.output, shapes=orig_shape, dtypes=orig_dtypes) + + @classmethod + def version_1(cls, ctx, node, **kwargs): + cls.any_version(1, ctx, node, **kwargs) + + @classmethod + def version_13(cls, ctx, node, **kwargs): + # Parameters moved to inputs for operator Squeeze, Unsqueeze. + cls.any_version(13, ctx, node, **kwargs) + + +@tf_op("SpaceToBatchND", onnx_op="SpaceToDepth") +class SpaceToBatch: + @classmethod + def version_1(cls, ctx, node, **kwargs): + # block_shape impacts Transpose 'perm' attribute values. + # must be available at compile time + utils.make_sure(node.inputs[1].is_const(), 'only support constant block_shape value.') + + block_shape = node.inputs[1].get_tensor_value(False) + blocklen = len(block_shape) + xlen = len(ctx.get_shape(node.input[0])) + + # if 3d or 4d tensor & square 2d block_shape , can optimize + cond1 = xlen in [3, 4] + # with opset 11 (or above), we can deal with non-const pads + # by creating a subgraph with Split and Concat and pass its output + # to Pad's second input + cond2 = node.inputs[2].is_const() or ctx.opset >= 11 + cond3 = blocklen == 2 and block_shape[0] == block_shape[1] + if cond1 and cond2 and cond3: + # https://www.tensorflow.org/api_docs/python/tf/space_to_batch_nd + # the above link says the data format of input tensor should be (batch, spatial_shape, remaining_shape) + # and we only support 4D here, so the data format is NHWC + # onnx op "SpaceToDepth" does the same work on input tensor except that it works on "C", + # and it only supports NCHW + # T out = SpaceToBatchND(T input, int32 block_shape, int32 crops) + input_tensor = node.inputs[0] + input_shape = ctx.get_shape(input_tensor.output[0]) + shapes = [ctx.get_shape(node.output[0])] + dtypes = [ctx.get_dtype(node.output[0])] + + if len(input_shape) == 3: + # insert automatically an Unsqueeze op if the input is 3d + unsqz1 = GraphBuilder(ctx).make_unsqueeze( + {"axes": [3], "data": input_tensor.output[0]}, return_node=True) + # NHWC TO CNHW + trans1 = ctx.make_node("Transpose", unsqz1.output, {"perm": [3, 0, 1, 2]}) + else: + # Add explicit NHWC_TO_NCHW transpose before and NCHW_TO_NHWC transpose after subgraph. + # That enables more optimizations in TransposeOptimizer. + trans_nchw = ctx.make_node("Transpose", input_tensor.output, {"perm": NHWC_TO_NCHW}) + # NCHW TO CNHW + trans1 = ctx.make_node("Transpose", trans_nchw.output, {"perm": [1, 0, 2, 3]}) + # implement pads logic, the data format is NCHW + if ctx.opset <= 10 or node.inputs[2].is_const(): + paddings = node.inputs[2].get_tensor_value() + top, bottom = paddings[0] + left, right = paddings[1] + pads = [0, 0, top, left, + 0, 0, bottom, right] + if ctx.opset <= 10: + pad_op = ctx.make_node("Pad", trans1.output, attr={"pads": pads}) + else: + new_pads = ctx.make_const(name=utils.make_name("pads"), np_val=np.array(pads, dtype=np.int64)) + pad_op = ctx.make_node("Pad", [trans1.output[0], new_pads.output[0]]) + else: + # TODO: we should be able to support dynamic input here. + shape = ctx.make_const(name=utils.make_name("shape"), np_val=np.array([-1], dtype=np.int64)) + reshape = ctx.make_node("Reshape", inputs=[node.input[2], shape.output[0]]) + cast = ctx.make_node("Cast", reshape.output, attr={'to': utils.map_numpy_to_onnx_dtype(np.int64)}) + split = ctx.make_node("Split", inputs=cast.output, attr={}, output_count=4) + pads = split.output + zero = ctx.make_const(name=utils.make_name("zero"), np_val=np.array([0], dtype=np.int64)).output[0] + new_pads = ctx.make_node("Concat", [zero, zero, pads[0], pads[2], zero, zero, pads[1], pads[3]], + {'axis': 0}) + pad_op = ctx.make_node("Pad", [trans1.output[0], new_pads.output[0]]) + + reorganize_node = ctx.make_node(node.type, pad_op.output, attr={"blocksize": block_shape[0]}) + + ctx.remove_node(node.name) + if len(input_shape) == 3: + # CNHW TO NHWC + trans2 = ctx.make_node("Transpose", reorganize_node.output, {"perm": [1, 2, 3, 0]}) + # add a squeeze op to convert output into 3d + GraphBuilder(ctx).make_squeeze({"axes": [3], "data": trans2.output[0], "outputs": node.output}, + name=node.name, shapes=shapes, dtypes=dtypes) + else: + # CNHW TO NCHW + trans2 = ctx.make_node("Transpose", reorganize_node.output, {"perm": [1, 0, 2, 3]}) + ctx.make_node("Transpose", trans2.output, {"perm": NCHW_TO_NHWC}, + name=node.name, outputs=node.output, shapes=shapes, dtypes=dtypes) + else: + def mknode(optype, inputs, attrs=None): + nodename = utils.make_name(node.name + '_' + optype.lower()) + return ctx.make_node(optype, inputs, attrs, name=nodename) + + # support non 3D/4D tensors and dynamic pad vals + # dynamic slice starts at opset 10 + utils.make_sure(ctx.opset >= 11, 'non-4D tensor or non-const pads require opset 11') + + input0 = node.input[0] + input2 = node.input[2] + + # const vals + int_max_const, zero_const, one_const, minus1_const, blocklen_resize_const, \ + blocklenplus1_const, filltop_const, fillbottom_const, block_shape_const = \ + [n.output[0] for n in ctx.make_consts([[utils.get_max_value(np.int64)], [0], [1],\ + [-1], [-1, blocklen], [blocklen + 1],\ + [1, 0, 0, 0], [0, 0, 1, 0], block_shape])] + + x_shape = ctx.insert_new_node_on_input(node, 'Shape', node.input[0]) + x_rank = mknode('Size', [x_shape.output[0]]) + + # pad x prior to compute + pad = mknode('Cast', [input2], {'to': TensorProto.INT64}) + pad_shape = mknode('Shape', [pad.output[0]]) + pad_rank = mknode('Slice', [pad_shape.output[0], zero_const, one_const]) + pad_gap = mknode('Sub', [x_rank.output[0], pad_rank.output[0]]) + gapminus1 = mknode('Sub', [pad_gap.output[0], one_const]) + gapminus1fillbot = mknode('Mul', [fillbottom_const, gapminus1.output[0]]) + padfilltop = mknode('Pad', [pad.output[0], filltop_const]) + padfilltopbottom = mknode('Pad', [padfilltop.output[0], gapminus1fillbot.output[0]]) + pad_t = mknode('Transpose', [padfilltopbottom.output[0]]) + pad1d = mknode('Reshape', [pad_t.output[0], minus1_const]) + + # get the spatial and depth (i.e remaining) dimensions + # compute reduced spatial dimensions by dividing block_shape + x1 = mknode('Pad', [input0, pad1d.output[0]]) + x1_shape = mknode('Shape', [x1.output[0]]) + spatial = mknode('Slice', [x1_shape.output[0], one_const, blocklenplus1_const]) + depth = mknode('Slice', [x1_shape.output[0], blocklenplus1_const, int_max_const]) + reduced = mknode('Div', [spatial.output[0], block_shape_const]) + + # reshape x into smaller blocks before shuffle + ccat1 = mknode('Concat', [reduced.output[0], block_shape_const], {'axis': 0}) + reshape1 = mknode('Reshape', [ccat1.output[0], blocklen_resize_const]) + tr1 = mknode('Transpose', [reshape1.output[0]]) + interleave = mknode('Reshape', [tr1.output[0], minus1_const]) + shape1 = mknode('Concat', [minus1_const, interleave.output[0], depth.output[0]], {'axis': 0}) + x2 = mknode('Reshape', [x1.output[0], shape1.output[0]]) + + # permutation values for shuffling + p1 = list(range(2, 2 * blocklen + 1, 2)) + p2 = list(range(1, 2 * blocklen + 1, 2)) + perm = p1 + [0] + p2 + list(range(0, xlen + blocklen)[1 + 2 * blocklen:]) + + tr2 = mknode('Transpose', [x2.output[0]], {'perm': perm}) + shape2 = mknode('Concat', [minus1_const, reduced.output[0], depth.output[0]], {'axis': 0}) + orig_shape = node.output_shapes + orig_dtypes = node.output_dtypes + ctx.remove_node(node.name) + ctx.make_node('Reshape', [tr2.output[0], shape2.output[0]], + name=node.name, outputs=node.output, shapes=orig_shape, + dtypes=orig_dtypes) + + +@tf_op("IsInf", onnx_op="IsInf") +class IsInf: + @classmethod + def version_10(cls, ctx, node, **kwargs): + node_dtype = ctx.get_dtype(node.input[0]) + utils.make_sure(node_dtype, "Dtype of {} is None".format(node.name)) + if node_dtype not in [onnx_pb.TensorProto.FLOAT, onnx_pb.TensorProto.DOUBLE]: + raise ValueError("dtype " + str(node_dtype) + " is not supported in onnx for now") + + +@tf_op(["NonMaxSuppressionV2", "NonMaxSuppressionV3", "NonMaxSuppressionV4", "NonMaxSuppressionV5"]) +class NonMaxSuppression: + @classmethod + def any_version(cls, opset, ctx, node, **kwargs): + # int32 = NonMaxSuppressionV2(T boxes, T scores, int32 max_output_size, T iou_threshold, T score_threshold) + # int64 = NonMaxSuppression(T boxes, T scores, int64 max_output_size, T iou_threshold, T score_threshold), + # T means float32 here, the last 3 params are optional + # tf boxes is 2D ([boxes_num, 4]) while onnx is 3D ([num_batches, boxes_num, 4]) + # tf scores is 1D ([boxes_num])while onnx is 2D ([num_batches, num_classes, boxes_num]) + # onnx output is [num_selected_boxes, 3], the meaning of last dim is [batch_index, class_index, box_index] + # while tf's output is [num_selected_boxes] + + # NonMaxSuppressionV2, NonMaxSuppressionV3 return selected_indices + # NonMaxSuppressionV4 returns selected_indices, valid_outputs + # NonMaxSuppressionV5 returns selected_indices, selected_scores, valid_outputs + + needs_padding = "pad_to_max_output_size" in node.attr and node.attr["pad_to_max_output_size"].i == 1 + gb = GraphBuilder(ctx) + input_score0 = gb.make_unsqueeze({'data': node.input[0], 'axes': [0]}, return_node=True) + input_score1 = gb.make_unsqueeze({'data': node.input[1], 'axes': [0, 1]}, return_node=True) + ctx.replace_input(node, node.input[0], input_score0.output[0], 0) + ctx.replace_input(node, node.input[1], input_score1.output[0], 1) + input_score = input_score1 + + ctx.insert_new_node_on_input(node, "Cast", node.input[2], to=onnx_pb.TensorProto.INT64) + # replace original node with nonmaxsurppress + slice + squeeze + cast + dtypes = [[ctx.get_dtype(output)] for output in node.output] + shapes = [[ctx.get_shape(output)] for output in node.output] + max_output_size = node.input[2] + utils.make_sure(len(node.inputs) <= 5 or int(node.inputs[5].get_tensor_value(False)) == 0, + "soft_nms_sigma must be 0") + ctx.remove_node(node.name) + new_nonmaxsurppress = ctx.make_node("NonMaxSuppression", node.input[: 5]).output[0] + slice_op = GraphBuilder(ctx).make_slice({"data": new_nonmaxsurppress, + "axes": [1], "ends": [3], "starts": [2]}) + nms_output = GraphBuilder(ctx).make_squeeze({'data': slice_op, "axes": [1]}, return_node=True) + original_nms_output = nms_output + if node.type in ["NonMaxSuppressionV4", "NonMaxSuppressionV5"]: + # add valid_outputs count + output_idx = 2 if node.type in ["NonMaxSuppressionV5"] else 1 + shape_op = ctx.make_node("Shape", inputs=[nms_output.output[0]]) + reduce_op = GraphBuilder(ctx).make_reduce_sum( + {"data": shape_op.output[0], "axes": [0], "keepdims": 0, "noop_with_empty_axes": 1}) + ctx.make_node("Cast", inputs=[reduce_op], attr={"to": onnx_pb.TensorProto.INT32}, + outputs=[node.output[output_idx]], dtypes=dtypes[output_idx], shapes=shapes[output_idx], + op_name_scope=node.name) + + pad_amt = None + if needs_padding: + # pad_amt might be shared between selected_indices, selected_scores + sub_op = ctx.make_node("Sub", inputs=[max_output_size, shape_op.output[0]]) + raw_pad_float = ctx.make_node("Cast", inputs=[sub_op.output[0]], attr={"to": onnx_pb.TensorProto.FLOAT}) + relu_op = ctx.make_node("Relu", inputs=[raw_pad_float.output[0]]) + pad_amt = ctx.make_node("Cast", inputs=[relu_op.output[0]], attr={"to": onnx_pb.TensorProto.INT64}) + # + # pad selected_indices + # + if ctx.opset <= 10: # Dynamic padding not supported before opset 11 + zero_tensor = helper.make_tensor("value", onnx_pb.TensorProto.INT64, dims=[1], vals=[0]) + padding = ctx.make_node("ConstantOfShape", inputs=[pad_amt.output[0]], attr={"value": zero_tensor}) + pad_op = ctx.make_node("Concat", inputs=[nms_output.output[0], padding.output[0]], attr={'axis': 0}) + else: + const_zero = ctx.make_const(utils.make_name("const_zero"), np.array([0], dtype=np.int64)) + pad_val = ctx.make_node("Concat", inputs=[const_zero.output[0], pad_amt.output[0]], attr={'axis': 0}) + pad_op = ctx.make_node("Pad", inputs=[nms_output.output[0], pad_val.output[0]]) + nms_output = pad_op + + if node.type in ["NonMaxSuppressionV5"]: + if needs_padding: + # add selected_scores with padding + gather_op = ctx.make_node("Gather", inputs=[input_score.input[0], original_nms_output.output[0]], + dtypes=dtypes[1], shapes=shapes[1]) + if ctx.opset <= 10: # Dynamic padding not supported before opset 11 + zero_tensor = helper.make_tensor("value", dtypes[1], dims=[1], vals=[0]) + padding = ctx.make_node("ConstantOfShape", inputs=[pad_amt.output[0]], attr={"value": zero_tensor}) + pad_op = ctx.make_node("Concat", inputs=[gather_op.output[0], padding.output[0]], + outputs=[node.output[1]], dtypes=dtypes[1], shapes=shapes[1], + attr={'axis': 0}) + else: + const_zero = ctx.make_const(utils.make_name("const_zero"), np.array([0], dtype=np.int64)) + pad_val = ctx.make_node("Concat", inputs=[const_zero.output[0], pad_amt.output[0]], + attr={'axis': 0}) + pad_op = ctx.make_node("Pad", inputs=[gather_op.output[0], pad_val.output[0]], + outputs=[node.output[1]], dtypes=dtypes[1], shapes=shapes[1]) + else: + # add selected_scores without padding + ctx.make_node("Gather", inputs=[input_score.input[0], nms_output.output[0]], + outputs=[node.output[1]], dtypes=dtypes[1], shapes=shapes[1]) + + # cast selected_indices back to int32 + ctx.make_node("Cast", inputs=nms_output.output, attr={"to": onnx_pb.TensorProto.INT32}, + outputs=[node.output[0]], dtypes=dtypes[0], shapes=shapes[0]) + + @classmethod + def version_10(cls, ctx, node, **kwargs): + cls.any_version(10, ctx, node, **kwargs) + + @classmethod + def version_11(cls, ctx, node, **kwargs): + # no change + cls.any_version(11, ctx, node, **kwargs) + + @classmethod + def version_13(cls, ctx, node, **kwargs): + # Parameters moved to inputs for operator Squeeze, Unsqueeze. + cls.any_version(13, ctx, node, **kwargs) + + +@tf_op(["CombinedNonMaxSuppression"]) +class CombinedNonMaxSuppression: + @classmethod + def version_10(cls, ctx, node, **kwargs): + # boxes.shape = [batch_size, num_boxes, (1 OR num_classes), 4] + # scores.shape = [batch_size, num_boxes, num_classes] + boxes, scores, max_per_class, max_total_size, iou_threshold, score_threshold = node.input + + max_per_class = ctx.make_node("Cast", [max_per_class], attr={'to': TensorProto.INT64}).output[0] + max_total_size = ctx.make_node("Cast", [max_total_size], attr={'to': TensorProto.INT64}).output[0] + + pad_per_class = node.get_attr_value("pad_per_class", False) + clip_boxes = node.get_attr_value("clip_boxes", True) + shape = ctx.get_shape(boxes) + share_boxes_across_classes = shape is not None and shape[2] == 1 + utils.make_sure(share_boxes_across_classes, + "CombinedNonMaxSuppression only currently implemented for boxes shared across classes.") + + scores_shape = ctx.make_node("Shape", [scores]).output[0] + # value: [batch_size] + batch_size = GraphBuilder(ctx).make_slice({'data': scores_shape, 'starts': [0], 'ends': [1], 'axes': [0]}) + + num_classes = GraphBuilder(ctx).make_slice({'data': scores_shape, 'starts': [2], 'ends': [3], 'axes': [0]}) + max_per_class_times_classes = ctx.make_node("Mul", [max_per_class, num_classes]).output[0] + + const_zero_float = ctx.make_const(utils.make_name("const_zero"), np.array(0, np.float32)).output[0] + const_one_float = ctx.make_const(utils.make_name("const_one"), np.array(1, np.float32)).output[0] + const_zero = ctx.make_const(utils.make_name("const_zero"), np.array(0, np.int64)).output[0] + const_neg_one = ctx.make_const(utils.make_name("const_neg_one"), np.array(-1, np.int64)).output[0] + const_one = ctx.make_const(utils.make_name("const_one"), np.array(1, np.int64)).output[0] + + boxes_sq = GraphBuilder(ctx).make_squeeze({'data': boxes, 'axes': [2]}) + # scores_trans.shape = [batch_size, num_classes, num_boxes] + scores_trans = ctx.make_node("Transpose", [scores], attr={'perm': [0, 2, 1]}).output[0] + # shape: [num_selected, 3], elts of format [batch_index, class_index, box_index] + selected_indices = ctx.make_node( + "NonMaxSuppression", [boxes_sq, scores_trans, max_per_class, iou_threshold, score_threshold], + op_name_scope=node.name).output[0] + selected_classes_unsq = GraphBuilder(ctx).make_slice( + {'data': selected_indices, 'starts': [1], 'ends': [2], 'axes': [1]}) + selected_classes = GraphBuilder(ctx).make_squeeze({'data': selected_classes_unsq, 'axes': [1]}) + # shape: [num_selected] + selected_scores = ctx.make_node("GatherND", [scores_trans, selected_indices], op_name_scope=node.name).output[0] + # shape: [num_selected, 1] + selected_batch_idx = GraphBuilder(ctx).make_slice( + {'data': selected_indices, 'starts': [0], 'ends': [1], 'axes': [1]}) + selected_box_num = GraphBuilder(ctx).make_slice( + {'data': selected_indices, 'starts': [2], 'ends': [3], 'axes': [1]}) + combined_box_idx = ctx.make_node("Concat", [selected_batch_idx, selected_box_num], attr={'axis': 1}).output[0] + selected_boxes_unsq = ctx.make_node("GatherND", [boxes, combined_box_idx], op_name_scope=node.name).output[0] + # shape: [num_selected, 4] + selected_boxes = GraphBuilder(ctx).make_squeeze({'data': selected_boxes_unsq, 'axes': [1]}) + + clipped_boxes = selected_boxes + if clip_boxes: + clipped_boxes = ctx.make_node('Max', [clipped_boxes, const_zero_float]).output[0] + clipped_boxes = ctx.make_node('Min', [clipped_boxes, const_one_float]).output[0] + + # shape: [num_selected] + batch_idx_sq = GraphBuilder(ctx).make_squeeze({'data': selected_batch_idx, 'axes': [1]}) + # value: [num_selected] + num_selected = ctx.make_node("Shape", [selected_scores]).output[0] + num_selected_sq = GraphBuilder(ctx).make_squeeze({'data': num_selected, 'axes': [0]}) + # shape: [num_selected] + selected_range = ctx.make_node("Range", [const_zero, num_selected_sq, const_one]).output[0] + + + id_shape = ctx.make_node("Concat", [batch_size, batch_size], attr={'axis': 0}).output[0] + zero_tensor = helper.make_tensor("value", TensorProto.INT64, dims=[1], vals=[0]) + zeros_of_shape = ctx.make_node("ConstantOfShape", [id_shape], attr={"value": zero_tensor}).output[0] + # shape: [batch_size, batch_size] + id_matrix = ctx.make_node("EyeLike", [zeros_of_shape]).output[0] + # shape: [num_selected, batch_size] + one_hot_batch_idx = ctx.make_node("Gather", [id_matrix, batch_idx_sq], attr={'axis': 0}).output[0] + cum_batch_idx = ctx.make_node("CumSum", [one_hot_batch_idx, const_zero], {'exclusive': True}).output[0] + # shape: [num_selected] + idx_within_batch = ctx.make_node("GatherND", [cum_batch_idx, selected_batch_idx], attr={'batch_dims': 1}, + op_name_scope=node.name).output[0] + idx_within_batch_unsq = GraphBuilder(ctx).make_unsqueeze({'data': idx_within_batch, 'axes': [1]}) + combined_idx = ctx.make_node("Concat", [selected_batch_idx, idx_within_batch_unsq], attr={'axis': 1}).output[0] + + zero_tensor_float = helper.make_tensor("value", TensorProto.FLOAT, dims=[1], vals=[0]) + neg_one_tensor_float = helper.make_tensor("value", TensorProto.INT64, dims=[1], vals=[-1]) + # value: [batch_size, max_per_class_times_classes] + results_grid_shape = ctx.make_node( + "Concat", [batch_size, max_per_class_times_classes], attr={'axis': 0}).output[0] + scores_by_batch_empty = ctx.make_node( + "ConstantOfShape", [results_grid_shape], attr={"value": zero_tensor_float}).output[0] + idx_by_batch_empty = ctx.make_node( + "ConstantOfShape", [results_grid_shape], attr={"value": neg_one_tensor_float}).output[0] + + scores_by_batch = ctx.make_node("ScatterND", [scores_by_batch_empty, combined_idx, selected_scores]).output[0] + idx_by_batch = ctx.make_node("ScatterND", [idx_by_batch_empty, combined_idx, selected_range]).output[0] + + k_val = ctx.make_node("Min", [max_total_size, max_per_class_times_classes]).output[0] + + # shape: [batch_size, k_val] + top_k_vals, top_k_indices = \ + ctx.make_node("TopK", [scores_by_batch, k_val], attr={'axis': 1}, output_count=2).output + + top_k_selected_indices = ctx.make_node("GatherElements", [idx_by_batch, top_k_indices], attr={'axis': 1}, + op_name_scope=node.name).output[0] + + target_size = max_total_size + if pad_per_class: + target_size = k_val + + pad_amt = ctx.make_node("Sub", [target_size, k_val]).output[0] + pads_const = ctx.make_const(utils.make_name("pad_const"), np.array([0, 0, 0], np.int64)).output[0] + pads = ctx.make_node("Concat", [pads_const, pad_amt], attr={'axis': 0}).output[0] + + top_scores_pad = ctx.make_node("Pad", [top_k_vals, pads, const_zero_float]).output[0] + top_indices_pad = ctx.make_node("Pad", [top_k_selected_indices, pads, const_neg_one]).output[0] + top_indices_increment = ctx.make_node("Add", [top_indices_pad, const_one]).output[0] + + valid_indices = ctx.make_node("Greater", [top_k_selected_indices, const_neg_one]).output[0] + valid_indices_int = ctx.make_node("Cast", [valid_indices], attr={'to': TensorProto.INT32}).output[0] + # shape: [batch_size] + valid_indices_cnt = GraphBuilder(ctx).make_reduce_sum( + {"data": valid_indices_int, "axes": [-1], "keepdims": 0, "noop_with_empty_axes": 1}) + + box_pads = ctx.make_const(utils.make_name("pad_const"), np.array([1, 0, 0, 0], np.int64)).output[0] + class_pads = ctx.make_const(utils.make_name("pad_const"), np.array([1, 0], np.int64)).output[0] + clipped_boxes_pad = ctx.make_node("Pad", [clipped_boxes, box_pads, const_zero_float]).output[0] + selected_classes_pad = ctx.make_node("Pad", [selected_classes, class_pads, const_zero]).output[0] + nmsed_boxes = ctx.make_node("Gather", [clipped_boxes_pad, top_indices_increment], attr={'axis': 0}, + op_name_scope=node.name).output[0] + nmsed_classes = ctx.make_node("Gather", [selected_classes_pad, top_indices_increment], attr={'axis': 0}, + op_name_scope=node.name).output[0] + nmsed_classes_float = ctx.make_node("Cast", [nmsed_classes], attr={'to': TensorProto.FLOAT}).output[0] + + ctx.replace_all_inputs(node.output[0], nmsed_boxes) + ctx.replace_all_inputs(node.output[1], top_scores_pad) + ctx.replace_all_inputs(node.output[2], nmsed_classes_float) + ctx.replace_all_inputs(node.output[3], valid_indices_cnt) + ctx.remove_node(node.name) + + +@tf_op("ReverseSequence") +class ReverseSequence: + @classmethod + def version_8(cls, ctx, node, **kwargs): + # T output = ReverseSequence(T input, int32|int64 seq_lengths, @int seq_dim, @int batch_dim) + # T output = Scan(int64 sequence_lens, variadic initial_state_and_scan_inputs, @graph body, + # @ints directions,@int num_scan_inputs) + seq_dim = node.get_attr("seq_dim") + batch_dim = node.get_attr("batch_dim") + batch_major = seq_dim.i == 1 and (batch_dim or batch_dim.i == 0) + time_major = batch_dim.i == 1 and (seq_dim or seq_dim.i == 0) + perm_val = None + + if not batch_major and not time_major: + error_msg = "unsupported attributes, seq_dim:{}, batch_dim:{}".format(seq_dim, batch_dim) + raise ValueError(error_msg) + + if time_major: + old_shape = ctx.get_shape(node.input[0]) + old_dtype = ctx.get_dtype(node.input[0]) + perm_val = [1, 0] + rank = len(old_shape) + utils.make_sure(rank >= 2, "rank of reverse_sequence input {} is at least 2".format(node.input[0])) + perm_val += list(range(2, rank)) + trans_node = ctx.insert_new_node_on_input(node, "Transpose", node.input[0], perm=perm_val) + new_shape = nn.spatial_map(old_shape, perm_val) + ctx.set_shape(trans_node.output[0], new_shape) + ctx.set_dtype(trans_node.output[0], old_dtype) + + # handle batch_major input + node.type = "Scan" + node.set_attr("num_scan_inputs", 1) + input_dtype = ctx.get_dtype(node.input[0]) + input_shape = ctx.get_shape(node.input[0]) + + g = ctx.create_new_graph_with_same_config() + g.parent_graph = ctx + g.add_graph_input('X', input_dtype, input_shape[2:]) + g.make_node('Identity', ['X'], outputs=['Y']) + g.add_graph_output('Y', input_dtype, input_shape[2:]) + + node.set_body_graph_as_attr("body", g) + node.set_attr("directions", [1]) # reverse the scan input + + seq_len_dtype = ctx.get_dtype(node.input[1]) + if seq_len_dtype != onnx_pb.TensorProto.INT64: + cast_node = ctx.insert_new_node_on_input(node, "Cast", node.input[1], to=onnx_pb.TensorProto.INT64) + ctx.set_dtype(cast_node.output[0], onnx_pb.TensorProto.INT64) + ctx.copy_shape(node.input[1], cast_node.output[0]) + + if time_major: + # get back to time_major + op_name = utils.make_name(node.name) + trans_back_node = ctx.insert_new_node_on_output("Transpose", node.output[0], + name=op_name, perm=perm_val) + ctx.copy_dtype(node.output[0], trans_back_node.output[0]) + + tmp = node.input[0] + ctx.replace_input(node, node.input[0], node.input[1], 0) + ctx.replace_input(node, node.input[1], tmp, 1) + + @classmethod + def version_9(cls, ctx, node, **kwargs): + # T output = ReverseSequence(T input, int32|int64 seq_lengths, @int seq_dim, @int batch_dim) + # we cannot easily construct reverse_sequence equivalence in opset 9, so we will not support it + # here. Actually using loops to do that is kind of meaningless since there will be performance + # issue there for sure. + raise NotImplementedError("ReverseSequence is not supported to convert in OPSET 9," + " if possible please try using OPSET 8, or OPSET >=10 instead.") + + @classmethod + def version_10(cls, ctx, node, **kwargs): + # T output = ReverseSequence(T input, int32|int64 seq_lengths, @int seq_dim, @int batch_dim) + # T output = ReverseSequence(T input, int64 sequence_lens, @int time_axis, @int batch_axis) + seq_dim = node.get_attr("seq_dim") + utils.make_sure(seq_dim is not None, "sequence dim must be given in {}".format(node.name)) + seq_dim = seq_dim.i + batch_dim = node.get_attr_value("batch_dim", 0) + + ctx.remove_node(node.name) + node = ctx.make_node( + "ReverseSequence", + node.input, + outputs=node.output, + attr={"batch_axis": batch_dim, "time_axis": seq_dim}) + + seq_len_dtype = ctx.get_dtype(node.input[1]) + utils.make_sure(seq_len_dtype is not None, "dtype of {} is None".format(node.input[1])) + target_dtype = TensorProto.INT64 + if seq_len_dtype != target_dtype: + ctx.insert_new_node_on_input(node, "Cast", node.input[1], to=target_dtype) + + +@tf_op("ReverseV2") +class ReverseV2: + @classmethod + def version_10(cls, ctx, node, **kwargs): + # T output = ReverseV2(T input, int32|int64 seq_lengths, @int seq_dim, @int batch_dim) + # Implement tensorflow ReverseV2 op using multiple ReverseSequence (for each axis) + # and Transpose ops. We sort the axis vector (if non-empty) at the start. Each axis can + # be reversed only once (in tf) and so we can compute the transpose for each axis + # (other than 0), feed the tensor to a ReverseSequence node and finally transpose again + # to get back the original shape. + + axes_node = node.inputs[1] + axes = axes_node.get_tensor_value(as_list=False) + # Current support is for when axis is a 1D tensor. + utils.make_sure(len(axes.shape) == 1, + "Currently no support for reverseV2 tensor axis") + + axes = axes.tolist() + len_axes = len(axes) + + # Store input and output parameters of the ReverseV2 node. + rv2_in_names = [node.input[0]] + + input_shape = ctx.get_shape(node.input[0]) + input_rank = len(input_shape) + input_shape_node = ctx.make_node("Shape", [node.input[0]], op_name_scope=node.name) + + # Make sure input shape is not None + utils.make_sure(input_shape is not None, "shape of {} is None".format(node.input[0])) + + rv2_node_name = node.name + # ReverseV2 has a single output. + rv2_output_dtypes = node.output_dtypes + rv2_output_shapes = node.output_shapes + + # Remove ReverseV2 node from graph. + ctx.remove_node(rv2_node_name) + + # Variable to store input names for the next node. + inputs = rv2_in_names + + new_node = None + + # Empty axis vector. + if len_axes == 0: + # Replace ReverseV2 with an identity block. + ctx.make_node( + "Identity", + inputs=inputs, + outputs=node.output, + shapes=rv2_output_shapes, + dtypes=rv2_output_dtypes, + op_name_scope=rv2_node_name, + ) + + else: + # For negative indices use the positive counterpart. + for i, ax in enumerate(axes): + if ax < 0: + axes[i] += input_rank + + axes = sorted(axes) + + orig_perm = list(range(input_rank)) + curr_perm = [] + + # Add ReverseSequence nodes for each element of axis. + for i in range(len_axes): + + axis = axes[i] + + curr_perm = orig_perm.copy() + # Permutation indices relative to original tensor. + curr_perm[axis], curr_perm[0] = curr_perm[0], curr_perm[axis] + + # Add a Transpose node if the axis != 0 (finish first due to sort). + if axis != 0: + # Permutation indices for the transpose node relative to IN tensor shape. + new_node = ctx.make_node( + "Transpose", + inputs=inputs, + op_name_scope=rv2_node_name, + dtypes=rv2_output_dtypes, + attr={"perm": curr_perm} + ) + + inputs = [new_node.output[0]] + + const_one_name = utils.make_name('const_one') + const_one = ctx.make_const(name=const_one_name, np_val=np.array([1], dtype=np.int64)) + const_axis_name = utils.make_name(f'const_{axis}') + const_axis = ctx.make_const(name=const_axis_name, np_val=np.array([axis], dtype=np.int64)) + + # Add a Constant node (seq_len) for ReverseSequence. + # Index 1 for the shape should not return 0, since rank(input) >=2 + input_shape = ctx.make_node("Shape", [inputs[-1]], op_name_scope=rv2_node_name) + batch_size = ctx.make_node("Gather", [input_shape.output[0], const_one.output[0]], + op_name_scope=rv2_node_name) + axis_dim = ctx.make_node("Gather", [input_shape_node.output[0], const_axis.output[0]], + op_name_scope=rv2_node_name) + seq_array = ctx.make_node("Expand", [axis_dim.output[0], batch_size.output[0]]) + inputs.append(seq_array.output[0]) + + # Add a ReverseSequence node. + + # If processing for the final axis and the tensor shape permutation is + # original then the output is fed to the output of the ReverseV2 node. + # + # Else a new output is created which is fed to a Transpose node. + rs_out_name = node.output if \ + ((i == len_axes - 1) and (curr_perm == orig_perm)) \ + else None + + rs_out_shapes = None if rs_out_name is None else rv2_output_shapes + + new_node = ctx.make_node( + "ReverseSequence", + inputs=inputs, + op_name_scope=rv2_node_name, + outputs=rs_out_name, + shapes=rs_out_shapes, + dtypes=rv2_output_dtypes, + attr={"batch_axis": 1, "time_axis": 0} + ) + + inputs = [new_node.output[0]] + + # Additional transpose block is required if the current + # permutation list is not the original one. + if curr_perm != orig_perm: + + # Compute the required permutation list. + if len_axes != 1: + for i, ax in enumerate(axes[::-1][1:]): + curr_perm[0], curr_perm[ax] = \ + curr_perm[ax], curr_perm[0] + + # Add a Transpose node to restore shape. + ctx.make_node( + "Transpose", + inputs=inputs, + op_name_scope=rv2_node_name, + outputs=node.output, + shapes=rv2_output_shapes, + dtypes=rv2_output_dtypes, + attr={"perm": curr_perm} + ) + + +@tf_op("Unique", onnx_op="Unique") +class Unique: + @classmethod + def version_11(cls, ctx, node, **kwargs): + # opset 11 supports explicitly + dtypes = node.output_dtypes + node_name = node.name + node_inputs = node.input + node_outputs = node.output + ctx.remove_node(node_name) + if dtypes[0] in [TensorProto.INT32, TensorProto.INT16, TensorProto.UINT8, TensorProto.UINT16]: + inp_cast = ctx.make_node("Cast", [node_inputs[0]], attr={'to': TensorProto.INT64}).output[0] + node_inputs[0] = inp_cast + new_node = ctx.make_node("Unique", node_inputs, name=node_name, output_count=3, attr={'sorted': 0}) + ctx.replace_all_inputs(node_outputs[0], new_node.output[0]) + ctx.replace_all_inputs(node_outputs[1], new_node.output[2]) + if ctx.get_dtype(new_node.output[0]) != dtypes[0]: + ctx.insert_new_node_on_output("Cast", new_node.output[0], name=utils.make_name(node.name) + "_cast", + to=dtypes[0]) + if len(node_outputs) > 1: + # cast to int64 if needed + if dtypes[1] != onnx_pb.TensorProto.INT64: + cast_node = ctx.insert_new_node_on_output("Cast", new_node.output[2], + name=utils.make_name(node.name) + "_cast", + to=dtypes[1]) + ctx.set_dtype(cast_node.output[0], dtypes[1]) + ctx.copy_shape(new_node.output[2], cast_node.output[0]) + + +@tf_op(["Bincount", "DenseBincount"]) +class Bincount: + @classmethod + def any_version(cls, opset, ctx, node, **kwargs): + # arr, size are int32 + arr_inp, size_inp, weights_inp = node.input + binary_output = node.get_attr_value("binary_output", False) + arr_int64 = ctx.make_node("Cast", [arr_inp], attr={'to': TensorProto.INT64}).output[0] + size_int64 = ctx.make_node("Cast", [size_inp], attr={'to': TensorProto.INT64}).output[0] + + weights_shape = ctx.get_shape(weights_inp) + res_dtype = ctx.get_dtype(weights_inp) + weights_is_zero = weights_shape is not None and 0 in weights_shape + utils.make_sure(weights_is_zero, "Non-empty weights not yet supported for bincount") + + if ctx.get_rank(arr_inp) == 2: + zero_const = ctx.make_const(utils.make_name("zero_const"), np.array(0, np.int64)).output[0] + one_const = ctx.make_const(utils.make_name("one_const"), np.array(1, np.int64)).output[0] + inp_shape = ctx.make_node("Shape", [arr_inp]).output[0] + num_rows = GraphBuilder(ctx).make_slice({"data": inp_shape, "starts": [0], "ends": [1], "axes": [0]}) + num_rows_sq = GraphBuilder(ctx).make_squeeze({"data": num_rows, "axes": [0]}) + row_idx = ctx.make_node("Range", [zero_const, num_rows_sq, one_const]).output[0] + row_idx_unsq = GraphBuilder(ctx).make_unsqueeze({"data": row_idx, "axes": [1]}) + row_idx_expand = ctx.make_node("Expand", [row_idx_unsq, inp_shape]).output[0] + arr_int64_unsq = GraphBuilder(ctx).make_unsqueeze({"data": arr_int64, "axes": [2]}) + row_idx_expand_unsq = GraphBuilder(ctx).make_unsqueeze({"data": row_idx_expand, "axes": [2]}) + concat = ctx.make_node("Concat", [row_idx_expand_unsq, arr_int64_unsq], {"axis": 2}).output[0] + reshape_const = ctx.make_const(utils.make_name("reshape_const"), np.array([-1, 2], np.int64)).output[0] + reshaped = ctx.make_node("Reshape", [concat, reshape_const]).output[0] + values, _, _, counts = ctx.make_node("Unique", [reshaped], attr={'sorted': 1, 'axis': 0}, output_count=4, + op_name_scope=node.name).output + values_to_check_unsq = GraphBuilder(ctx).make_slice( + {"data": values, "starts": [1], "ends": [2], "axes": [1]}) + values_to_check = GraphBuilder(ctx).make_squeeze({"data": values_to_check_unsq, "axes": [1]}) + size_unsq = GraphBuilder(ctx).make_unsqueeze({'data': size_int64, "axes": [0]}) + output_shape = ctx.make_node("Concat", [num_rows, size_unsq], attr={"axis": 0}).output[0] + else: + values, _, _, counts = ctx.make_node("Unique", [arr_int64], attr={'sorted': 1}, output_count=4, + op_name_scope=node.name).output + values_to_check = values + output_shape = GraphBuilder(ctx).make_unsqueeze({'data': size_int64, "axes": [0]}) + + neg_one_const = ctx.make_const(utils.make_name("neg_one_const"), np.array(-1, np.int64)).output[0] + non_neg_val_locs = ctx.make_node("Greater", [values_to_check, neg_one_const]).output[0] + small_val_locs = ctx.make_node("Less", [values_to_check, size_int64]).output[0] + valid_val_locs = ctx.make_node("And", [non_neg_val_locs, small_val_locs]).output[0] + + valid_values = ctx.make_node("Compress", [values, valid_val_locs], attr={'axis': 0}).output[0] + if binary_output: + counts_shape = ctx.make_node("Shape", [valid_values]).output[0] + counts_shape_1d = GraphBuilder(ctx).make_slice( + {"data": counts_shape, "starts": [0], "ends": [1], "axes": [0]}) + ones_tensor = helper.make_tensor("value", TensorProto.INT64, dims=[1], vals=[1]) + valid_counts = ctx.make_node("ConstantOfShape", [counts_shape_1d], attr={'value': ones_tensor}).output[0] + else: + valid_counts = ctx.make_node("Compress", [counts, valid_val_locs], attr={'axis': 0}).output[0] + + zero_tensor = helper.make_tensor("value", TensorProto.INT64, dims=[1], vals=[0]) + zeros = ctx.make_node("ConstantOfShape", [output_shape], attr={'value': zero_tensor}).output[0] + + if ctx.get_rank(arr_inp) == 2: + result = ctx.make_node("ScatterND", [zeros, valid_values, valid_counts]).output[0] + else: + result = ctx.make_node("ScatterElements", [zeros, valid_values, valid_counts], attr={'axis': 0}).output[0] + result_cast = result + if res_dtype != TensorProto.INT64: + result_cast = ctx.make_node("Cast", [result], attr={'to': res_dtype}).output[0] + + ctx.replace_all_inputs(node.output[0], result_cast) + ctx.remove_node(node.name) + + @classmethod + def version_11(cls, ctx, node, **kwargs): + cls.any_version(11, ctx, node, **kwargs) + + @classmethod + def version_13(cls, ctx, node, **kwargs): + # Parameters moved to inputs for operator Squeeze, Unsqueeze. + cls.any_version(13, ctx, node, **kwargs) + + +@tf_op("SparseToDense") +class SparseToDense: + @classmethod + def version_11(cls, ctx, node, **kwargs): + sparse_indices, out_shape, sparse_vals, default_val = node.input + idx_shape = ctx.get_shape(sparse_indices) + val_shape = ctx.get_shape(sparse_vals) + val_is_scalar = val_shape is not None and val_shape[0] == 1 + idx_is_scalar = idx_shape is not None and idx_shape[0] == 1 + utils.make_sure(not val_is_scalar or idx_is_scalar, "SparseToDense not implemented yet for scalar values") + + expand_node = ctx.make_node("Expand", [default_val, out_shape]) + node.type = "ScatterND" + ctx.replace_inputs(node, [expand_node.output[0], sparse_indices, sparse_vals]) + + +def ragged_lengths_to_sparse_indices(ctx, ragged_lens): + const_zero_int64 = ctx.make_const(utils.make_name("const_zero"), np.array(0, dtype=np.int64)).output[0] + num_cols = ctx.make_node("ReduceMax", [ragged_lens], attr={'axes': [0], 'keeepdims': True}).output[0] + num_rows = ctx.make_node("Shape", [ragged_lens]).output[0] + range_len = ctx.make_node("Mul", [num_cols, num_rows]).output[0] + + # ORT seems to have a shape inference bug for the Range node. Use CumSum instead. + one_tensor = helper.make_tensor("value", TensorProto.INT64, dims=[1], vals=[1]) + ones_of_shape = ctx.make_node("ConstantOfShape", [range_len], attr={"value": one_tensor}).output[0] + range_node = ctx.make_node("CumSum", [ones_of_shape, const_zero_int64], attr={'exclusive': True}).output[0] + #const_one_int64 = ctx.make_const(utils.make_name("const_one"), np.array(1, dtype=np.int64)).output[0] + #range_node = ctx.make_node("Range", [const_zero_int64, range_len, const_one_int64]).output[0] + + col_indices_dense = ctx.make_node("Mod", [range_node, num_cols]).output[0] + row_indices_dense = ctx.make_node("Div", [range_node, num_cols]).output[0] + row_lens_dense = ctx.make_node("Gather", [ragged_lens, row_indices_dense]).output[0] + indices_to_keep = ctx.make_node("Less", [col_indices_dense, row_lens_dense]).output[0] + col_indices = ctx.make_node("Compress", [col_indices_dense, indices_to_keep]).output[0] + row_indices = ctx.make_node("Compress", [row_indices_dense, indices_to_keep]).output[0] + return num_rows, num_cols, row_indices, col_indices + + +def ragged_nested_splits_to_sparse_indices(ctx, nested_splits, op_name_scope): + sparse_indices = None + dense_shape_dims = [] + for split in nested_splits: + if ctx.get_dtype(split) != TensorProto.INT64: + split = ctx.make_node("Cast", [split], attr={'to': TensorProto.INT64}).output[0] + max_int64 = int(utils.get_max_value(np.int64)) + slice1 = GraphBuilder(ctx).make_slice( + {"data": split, "ends": [max_int64], "starts": [1], "axes": [0]}) + slice2 = GraphBuilder(ctx).make_slice( + {"data": split, "ends": [-1], "starts": [0], "axes": [0]}) + ragged_lens = ctx.make_node("Sub", [slice1, slice2]).output[0] + num_rows, num_cols, row_indices, col_indices = ragged_lengths_to_sparse_indices(ctx, ragged_lens) + if not dense_shape_dims: + dense_shape_dims.append(num_rows) + dense_shape_dims.append(num_cols) + if sparse_indices is None: + row_indices = GraphBuilder(ctx).make_unsqueeze({"data": row_indices, "axes": [1]}) + else: + row_indices = ctx.make_node("Gather", [sparse_indices, row_indices]).output[0] + col_indices = GraphBuilder(ctx).make_unsqueeze({"data": col_indices, "axes": [1]}) + sparse_indices = ctx.make_node("Concat", [row_indices, col_indices], attr={'axis': 1}, + op_name_scope=op_name_scope).output[0] + dense_shape = ctx.make_node("Concat", dense_shape_dims, attr={'axis': 0}, op_name_scope=op_name_scope).output[0] + return sparse_indices, dense_shape + + +@tf_op("RaggedTensorToSparse") +class RaggedTensorToSparse: + @classmethod + def version_11(cls, ctx, node, **kwargs): + # https://www.tensorflow.org/guide/ragged_tensor#multiple_ragged_dimensions + dense_values = node.input[-1] + nested_splits = node.input[:-1] + sparse_indices, dense_shape = ragged_nested_splits_to_sparse_indices(ctx, nested_splits, node.name) + ctx.replace_all_inputs(node.output[0], sparse_indices) + ctx.replace_all_inputs(node.output[1], dense_values) + ctx.replace_all_inputs(node.output[2], dense_shape) + ctx.remove_node(node.name) + + +@tf_op("RaggedTensorToTensor") +class RaggedTensorToTensor: + @classmethod + def version_11(cls, ctx, node, **kwargs): + shape, values, default_value, *row_partition_tensors = node.input + partition_types = node.get_attr_value("row_partition_types") + error_msg = "Only ROW_SPLITS partition type is supported for RaggedTensorToTensor. types: %r" + utils.make_sure(all(t == b'ROW_SPLITS' for t in partition_types), error_msg, partition_types) + nested_splits = row_partition_tensors + sparse_indices, dense_shape = ragged_nested_splits_to_sparse_indices(ctx, nested_splits, node.name) + # A shape of rank 0 means the natural shape should be used. + if ctx.get_rank(shape) != 0: + if ctx.get_dtype(shape) != TensorProto.INT64: + shape = ctx.make_node("Cast", [shape], attr={'to': TensorProto.INT64}).output[0] + const_zero_int64 = ctx.make_const(utils.make_name("const_zero"), np.array(0, dtype=np.int64)).output[0] + unspec_dims = ctx.make_node("Less", [shape, const_zero_int64]).output[0] + out_shape = ctx.make_node("Where", [unspec_dims, dense_shape, shape]).output[0] + out_shape_unsq = GraphBuilder(ctx).make_unsqueeze({'data': out_shape, 'axes': [0]}) + amt_idx_in_bounds = ctx.make_node("Sub", [out_shape_unsq, sparse_indices]).output[0] + amt_in_bounds_flat = ctx.make_node("ReduceMin", [amt_idx_in_bounds], attr={'axes': [1], 'keepdims': False}) + idx_in_bounds = ctx.make_node("Greater", [amt_in_bounds_flat.output[0], const_zero_int64]).output[0] + sparse_indices = ctx.make_node("Compress", [sparse_indices, idx_in_bounds], attr={'axis': 0}).output[0] + values = ctx.make_node("Compress", [values, idx_in_bounds], attr={'axis': 0}).output[0] + else: + out_shape = dense_shape + expand_node = ctx.make_node("Expand", [default_value, out_shape]) + node.type = "ScatterND" + ctx.replace_inputs(node, [expand_node.output[0], sparse_indices, values]) + + +@tf_op("RaggedRange") +class RaggedRange: + @classmethod + def version_11(cls, ctx, node, **kwargs): + starts, limits, deltas = node.input + data_dtype = ctx.get_dtype(starts) + data_np_dtype = utils.map_onnx_to_numpy_type(data_dtype) + data_is_float = np.dtype(data_np_dtype).kind == 'f' + + if data_is_float: + sub_node = ctx.make_node("Sub", [limits, starts]).output[0] + div_node = ctx.make_node("Div", [sub_node, deltas]).output[0] + ceil_node = ctx.make_node("Ceil", [div_node]).output[0] + row_lens = ctx.make_node("Cast", [ceil_node], attr={'to': TensorProto.INT64}).output[0] + + else: + # compute ceil(a/b) with ints + starts_cast = ctx.make_node("Cast", [starts], attr={'to': TensorProto.INT64}).output[0] + limits_cast = ctx.make_node("Cast", [limits], attr={'to': TensorProto.INT64}).output[0] + deltas_cast = ctx.make_node("Cast", [deltas], attr={'to': TensorProto.INT64}).output[0] + sub_node = ctx.make_node("Sub", [limits_cast, starts_cast]).output[0] + div_node = ctx.make_node("Div", [sub_node, deltas_cast]).output[0] + mul_node = ctx.make_node("Mul", [div_node, deltas_cast]).output[0] + eq_node = ctx.make_node("Equal", [mul_node, sub_node]).output[0] + ne_node = ctx.make_node("Not", [eq_node]).output[0] + # we want to round up if it isn't evenly divisible + offset = ctx.make_node("Cast", [ne_node], attr={'to': TensorProto.INT64}).output[0] + row_lens = ctx.make_node("Add", [div_node, offset]).output[0] + + const_zero_int64 = ctx.make_const(utils.make_name("const_zero"), np.array(0, dtype=np.int64)).output[0] + if ctx.opset <= 11: + const_zero_double = ctx.make_const(utils.make_name("const_zero"), np.array(0, dtype=np.float64)).output[0] + row_lens = ctx.make_node("Cast", [row_lens], attr={'to': TensorProto.DOUBLE}).output[0] + row_lens = ctx.make_node("Max", [row_lens, const_zero_double]).output[0] + row_lens = ctx.make_node("Cast", [row_lens], attr={'to': TensorProto.INT64}).output[0] + else: + row_lens = ctx.make_node("Max", [row_lens, const_zero_int64]).output[0] + + const_zero_list = ctx.make_const(utils.make_name("const_zero_list"), np.array([0], dtype=np.int64)).output[0] + + num_rows, _, row_indices, col_indices = ragged_lengths_to_sparse_indices(ctx, row_lens) + + split_ends = ctx.make_node("CumSum", [row_lens, const_zero_int64]).output[0] + splits_out = ctx.make_node("Concat", [const_zero_list, split_ends], attr={'axis': 0}).output[0] + col_indices_cast = ctx.make_node("Cast", [col_indices], attr={'to': data_dtype}).output[0] + + if ctx.get_rank(starts) != 1: + starts = ctx.make_node("Expand", [starts, num_rows]).output[0] + + if ctx.get_rank(deltas) != 1: + deltas = ctx.make_node("Expand", [deltas, num_rows]).output[0] + + gather_starts = ctx.make_node("Gather", [starts, row_indices]).output[0] + gather_deltas = ctx.make_node("Gather", [deltas, row_indices]).output[0] + + mul_node = ctx.make_node("Mul", [col_indices_cast, gather_deltas], op_name_scope=node.name).output[0] + dense_vals_out = ctx.make_node("Add", [gather_starts, mul_node], op_name_scope=node.name).output[0] + + ctx.replace_all_inputs(node.output[0], splits_out) + ctx.replace_all_inputs(node.output[1], dense_vals_out) + ctx.remove_node(node.name) + + +@tf_op("RaggedGather") +class RaggedGather: + @classmethod + def version_11(cls, ctx, node, **kwargs): + *params_nested_splits, params_dense_values, indices = node.input + inp_ragged_rank = node.get_attr_value("PARAMS_RAGGED_RANK") + out_ragged_rank = node.get_attr_value("OUTPUT_RAGGED_RANK") + err_msg = "RaggedGather conversion only supports ragged rank of 1" + utils.make_sure(inp_ragged_rank == 1 and out_ragged_rank == 1 and len(params_nested_splits) == 1, err_msg) + splits = params_nested_splits[0] + err_msg2 = "RaggedGather conversion only supports tensors with no dense dimensions" + utils.make_sure(ctx.get_rank(splits) in [None, 1] and ctx.get_rank(params_dense_values) in [None, 1], err_msg2) + splits_dtype = ctx.get_dtype(splits) + + if splits_dtype != TensorProto.INT64: + splits_64 = ctx.make_node("Cast", [splits], attr={'to': TensorProto.INT64}).output[0] + else: + splits_64 = splits + + max_int64 = int(utils.get_max_value(np.int64)) + slice1 = GraphBuilder(ctx).make_slice( + {"data": splits_64, "ends": [max_int64], "starts": [1], "axes": [0]}) + slice2 = GraphBuilder(ctx).make_slice( + {"data": splits_64, "ends": [-1], "starts": [0], "axes": [0]}) + ragged_lens = ctx.make_node("Sub", [slice1, slice2]).output[0] + + gathered_lens = ctx.make_node("Gather", [ragged_lens, indices], op_name_scope=node.name).output[0] + + const_zero_unsq = ctx.make_const(utils.make_name("const_zero"), np.array([0], dtype=np.int64)).output[0] + const_one_unsq = ctx.make_const(utils.make_name("const_one"), np.array([1], dtype=np.int64)).output[0] + gathered_lens_w_zero = ctx.make_node("Concat", [const_zero_unsq, gathered_lens], attr={'axis': 0}).output[0] + + const_zero_int64 = ctx.make_const(utils.make_name("const_zero"), np.array(0, dtype=np.int64)).output[0] + const_one_int64 = ctx.make_const(utils.make_name("const_one"), np.array(1, dtype=np.int64)).output[0] + + gathered_splits = ctx.make_node("CumSum", [gathered_lens_w_zero, const_zero_int64]).output[0] + if splits_dtype != TensorProto.INT64: + output_splits = ctx.make_node("Cast", [gathered_splits], attr={'to': splits_dtype}).output[0] + else: + output_splits = gathered_splits + + # Now that we have the splits, we just need to make the list of values. + total_length = GraphBuilder(ctx).make_slice( + {"data": gathered_splits, "ends": [max_int64], "starts": [-1], "axes": [0]}) + gathered_starts = ctx.make_node("Gather", [splits_64, indices], op_name_scope=node.name).output[0] + # We disregard any length 0 segments + non_zero_pos = ctx.make_node("Greater", [gathered_lens, const_zero_int64]).output[0] + non_zero_lens = ctx.make_node("Compress", [gathered_lens, non_zero_pos]).output[0] + non_zero_lens_shifted = ctx.make_node("Concat", [const_zero_unsq, non_zero_lens], attr={'axis': 0}).output[0] + non_zero_prev_lens = GraphBuilder(ctx).make_slice( + {"data": non_zero_lens_shifted, "ends": [-1], "starts": [0], "axes": [0]}) + non_zero_starts = ctx.make_node("Compress", [gathered_starts, non_zero_pos]).output[0] + non_zero_splits = ctx.make_node("Compress", [gathered_splits, non_zero_pos]).output[0] + + prev_starts = GraphBuilder(ctx).make_slice( + {"data": non_zero_starts, "ends": [-1], "starts": [0], "axes": [0]}) + prev_starts_concat = ctx.make_node("Concat", [const_one_unsq, prev_starts], attr={'axis': 0}).output[0] + deltas = ctx.make_node("Sub", [non_zero_starts, prev_starts_concat]).output[0] + deltas2 = ctx.make_node("Sub", [deltas, non_zero_prev_lens]).output[0] + deltas3 = ctx.make_node("Add", [deltas2, const_one_int64]).output[0] + one_tensor = helper.make_tensor("value", TensorProto.INT64, dims=[1], vals=[1]) + ones_of_shape = ctx.make_node("ConstantOfShape", [total_length], attr={"value": one_tensor}).output[0] + full_deltas = ctx.make_node("ScatterElements", [ones_of_shape, non_zero_splits, deltas3], attr={'axis': 0}) + full_indices = ctx.make_node("CumSum", [full_deltas.output[0], const_zero_int64]).output[0] + output_values = ctx.make_node("Gather", [params_dense_values, full_indices], op_name_scope=node.name).output[0] + + ctx.replace_all_inputs(node.output[0], output_splits) + ctx.replace_all_inputs(node.output[1], output_values) + ctx.remove_node(node.name) + + +@tf_op("SparseReshape") +class SparseReshape: + @classmethod + def any_version(cls, opset, ctx, node, **kwargs): + indices_inp, shape_inp, new_shape_inp = node.input + + product_curr_dims = ctx.make_node("ReduceProd", [shape_inp], attr={'axes': [0], 'keepdims': 1}).output[0] + product_new_dims = ctx.make_node("ReduceProd", [new_shape_inp], attr={'axes': [0], 'keepdims': 1}).output[0] + neg_missing_dims = ctx.make_node("Div", [product_curr_dims, product_new_dims]).output[0] + pos_missing_dims = ctx.make_node("Neg", [neg_missing_dims]).output[0] + zero_const = ctx.make_const(utils.make_name("cosnt_zero"), np.array(0, dtype=np.int64)).output[0] + one_const = ctx.make_const(utils.make_name("cosnt_one"), np.array(1, dtype=np.int64)).output[0] + unknown_dim_loc = ctx.make_node("Less", [new_shape_inp, zero_const]).output[0] + + new_shape = ctx.make_node("Where", [unknown_dim_loc, pos_missing_dims, new_shape_inp]).output[0] + + zero_tensor = helper.make_tensor("value", TensorProto.INT64, dims=[1], vals=[0]) + + def cum_prod_of_vector(vector): + shape = ctx.get_shape(vector) + rank = shape[0] if shape is not None else -1 + if rank != -1: + lower_tri = np.tri(rank, rank, dtype=np.bool) + lower_triangular_bool = ctx.make_const(utils.make_name("lower_tri_const"), lower_tri).output[0] + else: + rank = ctx.make_node("Shape", [vector]).output[0] + rank_sq = ctx.make_node("Concat", [rank, rank], attr={'axis': 0}).output[0] + square_of_rank = ctx.make_node("ConstantOfShape", [rank_sq], attr={'value': zero_tensor}).output[0] + identity_matrix = ctx.make_node("EyeLike", [square_of_rank]).output[0] + lower_triangular = ctx.make_node("CumSum", [identity_matrix, zero_const]).output[0] + lower_triangular_bool = ctx.make_node("Cast", [lower_triangular], + attr={'to': TensorProto.BOOL}).output[0] + terms = ctx.make_node("Where", [lower_triangular_bool, one_const, vector]).output[0] + return ctx.make_node("ReduceProd", [terms], attr={'axes': [1], 'keepdims': 0}).output[0] + + cum_prod_curr_shape = cum_prod_of_vector(shape_inp) + cum_prod_new_shape = cum_prod_of_vector(new_shape) + cum_prod_new_concat = ctx.make_node("Concat", [product_curr_dims, cum_prod_new_shape], + attr={'axis': 0}).output[0] + pads = ctx.make_const(utils.make_name("pad_const"), np.array([0, -1], dtype=np.int64)).output[0] + cum_prod_new_inc = ctx.make_node("Pad", [cum_prod_new_concat, pads]).output[0] + + flat_indices = ctx.make_node("MatMul", [indices_inp, cum_prod_curr_shape]).output[0] + indices_unsqueeze = GraphBuilder(ctx).make_unsqueeze({'data': flat_indices, "axes": [1]}) + mod_indices = ctx.make_node("Mod", [indices_unsqueeze, cum_prod_new_inc], op_name_scope=node.name).output[0] + new_indices = ctx.make_node("Div", [mod_indices, cum_prod_new_shape], op_name_scope=node.name).output[0] + + ctx.replace_all_inputs(node.output[0], new_indices) + ctx.replace_all_inputs(node.output[1], new_shape) + ctx.remove_node(node.name) + + @classmethod + def version_11(cls, ctx, node, **kwargs): + cls.any_version(11, ctx, node, **kwargs) + + @classmethod + def version_13(cls, ctx, node, **kwargs): + # Parameters moved to inputs for operator Squeeze, Unsqueeze. + cls.any_version(13, ctx, node, **kwargs) + + +@tf_op("SparseFillEmptyRows") +class SparseFillEmptyRows: + @classmethod + def any_version(cls, opset, ctx, node, **kwargs): + sparse_indices, sparse_vals, dense_shape, default_val = node.input + utils.make_sure(len(ctx.find_output_consumers(node.output[3])) == 0, + "reverse_index_map output of SparseFillEmptyRows not implemented") + axis_0_indices = GraphBuilder(ctx).make_slice({"data": sparse_indices, "ends": [1], "starts": [0], "axes": [1]}) + unique_indices = ctx.make_node("Unique", [axis_0_indices], op_name_scope=node.name).output[0] + axis_0_len = GraphBuilder(ctx).make_slice({"data": dense_shape, "ends": [1], "starts": [0], "axes": [0]}) + + true_tensor = helper.make_tensor("value", TensorProto.BOOL, dims=[1], vals=[True]) + true_of_shape = ctx.make_node("ConstantOfShape", inputs=[axis_0_len], attr={"value": true_tensor}, + op_name_scope=node.name).output[0] + unique_shape = ctx.make_node("Shape", [unique_indices], op_name_scope=node.name).output[0] + false_tensor = helper.make_tensor("value", TensorProto.BOOL, dims=[1], vals=[False]) + false_of_shape = ctx.make_node("ConstantOfShape", inputs=[unique_shape], attr={"value": false_tensor}, + op_name_scope=node.name).output[0] + + indicators = ctx.make_node("ScatterElements", [true_of_shape, unique_indices, false_of_shape], + op_name_scope=node.name).output[0] + zero_const = ctx.make_const(utils.make_name("zero_const"), np.array(0, dtype=np.int64)).output[0] + one_const = ctx.make_const(utils.make_name("one_const"), np.array(1, dtype=np.int64)).output[0] + + scalar_len = GraphBuilder(ctx).make_squeeze({'data': axis_0_len, "axes": [0]}, op_name_scope=node.name) + idx_range = ctx.make_node("Range", [zero_const, scalar_len, one_const], op_name_scope=node.name).output[0] + new_indices = ctx.make_node("Compress", [idx_range, indicators], op_name_scope=node.name).output[0] + new_indices_unsqueeze = GraphBuilder(ctx).make_unsqueeze( + {'data': new_indices, 'axes': [1]}, op_name_scope=node.name) + num_empty_rows = ctx.make_node("Shape", [new_indices], op_name_scope=node.name).output[0] + new_values = ctx.make_node("Expand", [default_val, num_empty_rows], op_name_scope=node.name).output[0] + indices_shape = ctx.make_node("Shape", [sparse_indices], op_name_scope=node.name).output[0] + idx_shape = GraphBuilder(ctx).make_slice({"data": indices_shape, "ends": [2], "starts": [1], "axes": [0]}) + idx_shape_min_1 = ctx.make_node("Sub", [idx_shape, one_const], op_name_scope=node.name).output[0] + + triple_0 = ctx.make_const(utils.make_name("triple_0"), np.array([0, 0, 0], dtype=np.int64)).output[0] + new_indices_pads = ctx.make_node("Concat", [triple_0, idx_shape_min_1], attr={"axis": 0}, + op_name_scope=node.name).output[0] + new_indices_2d = ctx.make_node("Pad", [new_indices_unsqueeze, new_indices_pads], + op_name_scope=node.name).output[0] + + combined_indices = ctx.make_node("Concat", [sparse_indices, new_indices_2d], attr={"axis": 0}, + op_name_scope=node.name).output[0] + combined_vals = ctx.make_node("Concat", [sparse_vals, new_values], attr={"axis": 0}, + op_name_scope=node.name).output[0] + + # The indices will not be sorted (violates a TF requirement), but conversions for subsequent ops + # (like SparseToDense) don't care and will work fine. Add a TopK to sort in the future if needed. + ctx.replace_all_inputs(node.output[0], combined_indices) + ctx.replace_all_inputs(node.output[1], combined_vals) + ctx.replace_all_inputs(node.output[2], indicators) + + ctx.remove_node(node.name) + + @classmethod + def version_11(cls, ctx, node, **kwargs): + cls.any_version(11, ctx, node, **kwargs) + + @classmethod + def version_13(cls, ctx, node, **kwargs): + # Parameters moved to inputs for operator Squeeze, Unsqueeze. + cls.any_version(13, ctx, node, **kwargs) + + +@tf_op("DynamicPartition") +class DynamicPartition: + @classmethod + def any_version(cls, opset, ctx, node, **kwargs): + # For desired behavior, see diagram: https://www.tensorflow.org/api_docs/python/tf/raw_ops/DynamicPartition + data_inp = node.input[0] + partition_inp = node.input[1] + partition_shape = ctx.get_shape(partition_inp) + num_partitions = node.get_attr_value('num_partitions') + utils.make_sure(partition_shape is not None, "DynamicPartition requires known rank") + utils.make_sure(len(partition_shape) == 1, "DynamicPartition only implemented for partitions of rank 1") + # Put partitions into OneHot format + range_val = np.arange(num_partitions, dtype=np.int32).reshape([num_partitions, 1]) + range_const = ctx.make_const(utils.make_name('range_const'), range_val) + equal_node = ctx.make_node("Equal", [partition_inp, range_const.output[0]]) + # Cast bool to int since ORT doesn't implement Split on bool. + equal_int32 = ctx.make_node("Cast", [equal_node.output[0]], attr={"to": TensorProto.INT32}) + split_node = ctx.make_node("Split", [equal_int32.output[0]], output_count=num_partitions, attr={'axis': 0}) + for i in range(num_partitions): + cond_bools = ctx.make_node("Cast", [split_node.output[i]], attr={"to": TensorProto.BOOL}) + squeeze_node = GraphBuilder(ctx).make_squeeze({'data': cond_bools.output[0], "axes": [0]}, return_node=True) + compress_node = ctx.make_node("Compress", [data_inp, squeeze_node.output[0]], attr={'axis': 0}) + ctx.replace_all_inputs(node.output[i], compress_node.output[0]) + ctx.copy_dtype(node.output[i], compress_node.output[0]) + ctx.copy_shape(node.output[i], compress_node.output[0]) + ctx.remove_node(node.name) + + @classmethod + def version_9(cls, ctx, node, **kwargs): + cls.any_version(9, ctx, node, **kwargs) + + @classmethod + def version_13(cls, ctx, node, **kwargs): + # Parameters moved to inputs for operator Squeeze, Unsqueeze. + cls.any_version(13, ctx, node, **kwargs) + + +@tf_op(["DynamicStitch", "ParallelDynamicStitch"]) +class DynamicStitch: + @classmethod + def any_version(cls, opset, ctx, node, **kwargs): + num_partitions = len(node.input) // 2 + index_inputs = node.input[:num_partitions] + data_inputs = node.input[num_partitions:] + index_shapes = [ctx.get_shape(inp) for inp in index_inputs] + data_shapes = [ctx.get_shape(inp) for inp in data_inputs] + utils.make_sure(all(s is not None and len(s) == 1 for s in index_shapes), + "DynamicStitch only implemented for index tensors of rank 1") + utils.make_sure(all(s is not None for s in data_shapes), "DynamicStitch requires data tensors of known rank") + data_rank = len(data_shapes[0]) + dtype = ctx.get_dtype(node.output[0]) + concat_indices = ctx.make_node("Concat", index_inputs, attr={'axis': 0}) + concat_indices_int64 = ctx.make_node("Cast", [concat_indices.output[0]], attr={"to": TensorProto.INT64}) + + concat_data = ctx.make_node("Concat", data_inputs, attr={'axis': 0}) + + data_shape = ctx.make_node("Shape", [concat_data.output[0]]) + unsqueezed_indices = concat_indices_int64 + if data_rank > 1: + unsqueeze_axes = list(range(1, data_rank)) + unsqueezed_indices = GraphBuilder(ctx).make_unsqueeze( + {'data': concat_indices_int64.output[0], "axes": unsqueeze_axes}, return_node=True) + expanded_indices = ctx.make_node("Expand", [unsqueezed_indices.output[0], data_shape.output[0]]) + + zero_tensor = helper.make_tensor("value", dtype, dims=[1], vals=[0]) + zeros_of_shape = ctx.make_node("ConstantOfShape", [data_shape.output[0]], attr={"value": zero_tensor}) + + name = node.name + outputs = node.output + ctx.remove_node(node.name) + ctx.make_node("ScatterElements", + [zeros_of_shape.output[0], expanded_indices.output[0], concat_data.output[0]], + name=name, + outputs=outputs, + attr={'axis': 0}) + + @classmethod + def version_10(cls, ctx, node, **kwargs): + cls.any_version(10, ctx, node, **kwargs) + + @classmethod + def version_13(cls, ctx, node, **kwargs): + # Parameters moved to inputs for operator Squeeze, Unsqueeze. + cls.any_version(13, ctx, node, **kwargs) + + +@tf_op("MatrixDiagPart") +class MatrixDiagPart: + @classmethod + def any_version(cls, opset, ctx, node, **kwargs): + # MatrixDiagPart by slice and gather + minus_two_one, minus_two, minus_one, zeo, zeo_zeo, one, two, two_one = \ + [n.output[0] for n in ctx.make_consts([[-2, -1], [-2], [-1], [0], [0, 0], [1], [2], [2, 1]])] + zeo_, one_ = [n.output[0] for n in ctx.make_consts([0, 1])] + + input_shape = ctx.make_node('Shape', [node.input[0]]) + input_shape_size = ctx.make_node('Shape', [input_shape.output[0]]) + matrice_shape = ctx.make_node('Slice', + [input_shape.output[0], minus_two, input_shape_size.output[0]]) + matrice_shape_float = ctx.make_node('Cast', [matrice_shape.output[0]], attr={'to': TensorProto.FLOAT}) + matrice_shape_float_x = ctx.make_node('Slice', [matrice_shape_float.output[0], zeo, one]) + matrice_shape_float_y = ctx.make_node('Slice', + [matrice_shape_float.output[0], one, two]) + min_matrice_dim_float = ctx.make_node('Min', [matrice_shape_float_x.output[0], matrice_shape_float_y.output[0]]) + min_matrice_dim = ctx.make_node('Cast', [min_matrice_dim_float.output[0]], attr={'to': TensorProto.INT64}) + double_matrice_dim = ctx.make_node('Concat', [min_matrice_dim.output[0], min_matrice_dim.output[0]], + attr={'axis': -1}) + sliced_input = ctx.make_node('Slice', [node.input[0], zeo_zeo, double_matrice_dim.output[0], two_one]) + sliced_input_shape = ctx.make_node('Shape', [sliced_input.output[0]]) + sliced_input_shape_half = ctx.make_node('Slice', [sliced_input_shape.output[0], zeo, + minus_one]) + sliced_input_shape_new = ctx.make_node('Concat', [sliced_input_shape_half.output[0], one], + attr={'axis': -1}) + gb = GraphBuilder(ctx) + min_matrice_dim_ = gb.make_squeeze( + {'data': min_matrice_dim.output[0], "axes": [0]}, return_node=True) + matrice_range = ctx.make_node('Range', [zeo_, min_matrice_dim_.output[0], one_]) + unsqueezed_matrice_range = gb.make_unsqueeze( + {'data': matrice_range.output[0], "axes": [-1]}, return_node=True) + expanded_range = ctx.make_node('Expand', [unsqueezed_matrice_range.output[0], sliced_input_shape_new.output[0]]) + gathered_result = ctx.make_node('GatherElements', [sliced_input.output[0], expanded_range.output[0]], + attr={'axis': -1}) + shapes = node.output_shapes + dtypes = node.output_dtypes + ctx.remove_node(node.name) + gb.make_squeeze( + {'data': gathered_result.output[0], "axes": [-1], 'outputs': node.output}, return_node=True, + name=node.name, shapes=shapes, dtypes=dtypes) + + @classmethod + def version_11(cls, ctx, node, **kwargs): + cls.any_version(11, ctx, node, **kwargs) + + @classmethod + def version_13(cls, ctx, node, **kwargs): + # Parameters moved to inputs for operator Squeeze, Unsqueeze. + cls.any_version(13, ctx, node, **kwargs) + + +@tf_op(["MatrixDiagPartV2", "MatrixDiagPartV3"]) +class MatrixDiagPartV2V3: + @classmethod + def version_11(cls, ctx, node, **kwargs): + # assemble MatrixDiagPart V2&V3 by looping k diagonals with proper pads + minus_two, minus_one, zeo, one, two = \ + [n.output[0] for n in ctx.make_consts([[-2], [-1], [0], [1], [2]])] + + def normalize(): + raw_k = ctx.make_node('Cast', [node.input[1]], attr={'to': TensorProto.INT64}).output[0] + return ctx.make_node('Reshape', [raw_k, minus_one]).output[0] + + input_tensor = node.input[0] + k = normalize() + padding = node.input[2] + align = 'LEFT_LEFT' + if node.op.op_type == 'MatrixDiagPartV3': + align = node.get_attr_str('align') if 'align' in node.attr else 'LEFT_RIGHT' + input_rank = len(ctx.get_shape(input_tensor)) + raw_input_shape = [-1] * input_rank + per_loop_shape = raw_input_shape[:-1] + raw_output_shape = raw_input_shape[:-2] + [-1] + loop_output_shape = raw_output_shape + [-1] + ctx.set_shape(node.output[0], raw_output_shape) + for out in ctx.find_output_consumers(node.output[0]): + if out.op.op_type == 'Identity': + ctx.set_shape(out.output[0], raw_output_shape) + + # prepare new_shape of input + input_shape = ctx.make_node('Shape', [input_tensor]) + shape_input_shape = ctx.make_node('Shape', [input_shape.output[0]]) + matrix_shape = ctx.make_node('Slice', + [input_shape.output[0], minus_two, shape_input_shape.output[0]]) + min_dim = ctx.make_node('ReduceMin', [matrix_shape.output[0]]) + input_depth = ctx.make_node('Slice', [matrix_shape.output[0], minus_two, minus_one]) + input_width = ctx.make_node('Slice', [matrix_shape.output[0], minus_one, two]) + temp_shape = ctx.make_node('Concat', [minus_one, matrix_shape.output[0]], attr={'axis': 0}) + temp_input = ctx.make_node('Reshape', [input_tensor, temp_shape.output[0]]) + temp_transposed = ctx.make_node('Transpose', [temp_input.output[0]], attr={'perm': [0, 2, 1]}) + half_shape = ctx.make_node('Slice', [input_shape.output[0], zeo, minus_two]) + new_shape = ctx.make_node('Concat', [half_shape.output[0], input_width.output[0], input_depth.output[0]], + attr={'axis': 0}) + # define body graph for main loop + k_shape = ctx.make_node('Shape', [k]) + k_start = ctx.make_node('Slice', [k, zeo, one]) + k_end = ctx.make_node('Slice', [k, minus_one, k_shape.output[0]]) + raw_total_k = ctx.make_node('Sub', [k_end.output[0], k_start.output[0]]) + total_k = ctx.make_node('Add', [raw_total_k.output[0], one]) + trip_name = utils.make_name(node.name + "_i") + cond_name = utils.make_name(node.name + "_cond") + body_graph = ctx.create_new_graph_with_same_config() + body_graph.add_graph_input(trip_name, TensorProto.INT64, [1]) + body_graph.add_graph_input(cond_name, TensorProto.BOOL, []) + body_graph.parent_graph = ctx + # identity of input + identity_input_graph = body_graph.create_new_graph_with_same_config() + identity_input_graph.parent_graph = body_graph + identity_input = identity_input_graph.make_node('Identity', [input_tensor]) + identity_input_graph.add_graph_output(identity_input.output[0], ctx.get_dtype(node.input[0]), raw_input_shape) + # transposed input + transposed_input_graph = body_graph.create_new_graph_with_same_config() + transposed_input_graph.parent_graph = body_graph + next_shape = transposed_input_graph.make_node('Concat', [half_shape.output[0], input_width.output[0], + input_depth.output[0]], attr={'axis': 0}) + transposed_input = transposed_input_graph.make_node('Reshape', + [temp_transposed.output[0], next_shape.output[0]]) + transposed_input_graph.add_graph_output(transposed_input.output[0], ctx.get_dtype(node.input[0]), + raw_input_shape) + # compute current k of the loop + current_k = body_graph.make_node('Sub', [k_end.output[0], trip_name]) + is_k_noneg = body_graph.make_node('Greater', [current_k.output[0], minus_one]) + branches = {'then_branch': identity_input_graph, 'else_branch': transposed_input_graph} + processed_input = body_graph.make_node('If', [is_k_noneg.output[0]], branches=branches) + processed_shape = body_graph.make_node('Shape', [processed_input.output[0]]) + shape_processed_shape = body_graph.make_node('Shape', [processed_shape.output[0]]) + new_depth = body_graph.make_node('Slice', + [processed_shape.output[0], minus_two, minus_one]) + new_width = body_graph.make_node('Slice', [processed_shape.output[0], minus_one, + shape_processed_shape.output[0]]) + abs_k = body_graph.make_node('Abs', [current_k.output[0]]) + + range_k = body_graph.make_node('Range', [abs_k.output[0], new_width.output[0], one], + domain="com.microsoft") + sliced_range = body_graph.make_node('Slice', [range_k.output[0], zeo, new_depth.output[0]]) + sliced_shape = body_graph.make_node('Shape', [sliced_range.output[0]]) + pad_length = body_graph.make_node('Sub', [new_depth.output[0], sliced_shape.output[0]]) + pad_length_2 = body_graph.make_node('Concat', [zeo, pad_length.output[0]], attr={'axis': 0}) + padded_range = body_graph.make_node('Pad', [sliced_range.output[0], pad_length_2.output[0]]) + # opset == 11, no need to change unsqueeze + unsqueezed_range = GraphBuilder(body_graph).make_unsqueeze( + {'data': padded_range.output[0], 'axes': [1]}, return_node=True) + half_shape_x = body_graph.make_node('Slice', + [new_shape.output[0], zeo, minus_two]) + shape_range = body_graph.make_node('Shape', [unsqueezed_range.output[0]]) + full_shape = body_graph.make_node('Concat', [half_shape_x.output[0], shape_range.output[0]], attr={'axis': 0}) + expanded_range = body_graph.make_node('Expand', [unsqueezed_range.output[0], full_shape.output[0]]) + gathered_input = body_graph.make_node('GatherElements', [processed_input.output[0], expanded_range.output[0]], + attr={'axis': -1}) + squeezed_input = GraphBuilder(body_graph).make_squeeze( + {'data': gathered_input.output[0], 'axes': [-1]}, return_node=True) + left_width = body_graph.make_node('Sub', [new_width.output[0], abs_k.output[0]]) + dims = body_graph.make_node('Concat', [left_width.output[0], new_depth.output[0]], attr={'axis': 0}) + valid_dim = body_graph.make_node('ReduceMin', [dims.output[0]]) + raw_output = body_graph.make_node('Slice', [squeezed_input.output[0], zeo, valid_dim.output[0], + minus_one]) + gap_output = body_graph.make_node('Sub', [min_dim.output[0], valid_dim.output[0]]) + gaps = body_graph.make_node('Concat', [zeo, gap_output.output[0]], attr={'axis': 0}) + processed_gap = body_graph.make_node('ReduceMax', [gaps.output[0]]) + pad_zero = body_graph.make_node('Mul', [new_shape.output[0], zeo]) + sliced_zero = body_graph.make_node('Slice', [pad_zero.output[0], zeo, minus_two]) + # gap_pos_k_graph + gap_pos_k_graph = body_graph.create_new_graph_with_same_config() + gap_pos_k_graph.parent_graph = body_graph + gap_pos_k = gap_pos_k_graph.make_node('Concat', [zeo, + processed_gap.output[0]], + attr={'axis': 0}) \ + if align.startswith('LEFT') \ + else gap_pos_k_graph.make_node('Concat', [processed_gap.output[0], + zeo], + attr={'axis': 0}) + gap_pos_k_graph.add_graph_output(gap_pos_k.output[0], TensorProto.INT64, [-1]) + # gap_neg_k_graph + gap_neg_k_graph = body_graph.create_new_graph_with_same_config() + gap_neg_k_graph.parent_graph = body_graph + gap_neg_k = gap_neg_k_graph.make_node('Concat', [zeo, + processed_gap.output[0]], + attr={'axis': 0}) \ + if align.endswith('LEFT') \ + else gap_neg_k_graph.make_node('Concat', [processed_gap.output[0], + zeo], + attr={'axis': 0}) + gap_neg_k_graph.add_graph_output(gap_neg_k.output[0], TensorProto.INT64, [-1]) + # pad output with gap + branches = {"then_branch": gap_pos_k_graph, "else_branch": gap_neg_k_graph} + gap_k = body_graph.make_node('If', [is_k_noneg.output[0]], branches=branches) + gap_left = body_graph.make_node('Slice', [gap_k.output[0], zeo, one]) + gap_right = body_graph.make_node('Slice', [gap_k.output[0], one, two]) + gap_all = body_graph.make_node('Concat', [sliced_zero.output[0], gap_left.output[0], sliced_zero.output[0], + gap_right.output[0]], attr={'axis': 0}) + padded_output = body_graph.make_node('Pad', [raw_output.output[0], gap_all.output[0], padding]) + cond_output = body_graph.make_node('Identity', [cond_name]) + body_graph.add_graph_output(cond_output.output[0], TensorProto.BOOL, []) + body_graph.add_graph_output(padded_output.output[0], ctx.get_dtype(node.input[0]), per_loop_shape) + body_graph.add_graph_output(gap_k.output[0], TensorProto.INT64, [-1]) + # make loop + cond_const = ctx.make_const(utils.make_name("cond"), np.ones((), dtype=np.bool)) + branches = {"body": body_graph} + main_loop = ctx.make_node('Loop', [total_k.output[0], cond_const.output[0]], output_count=2, branches=branches) + # reshape output + next_padded_shape = ctx.make_node('Concat', [total_k.output[0], minus_one, min_dim.output[0]], + attr={'axis': 0}) + reshaped_padded = ctx.make_node('Reshape', [main_loop.output[0], next_padded_shape.output[0]]) + transposed_padded = ctx.make_node('Transpose', [reshaped_padded.output[0]], attr={'perm': [1, 0, 2]}) + output_shape = ctx.make_node('Concat', [half_shape.output[0], total_k.output[0], minus_one], + attr={'axis': 0}) + reshaped_output = ctx.make_node('Reshape', [transposed_padded.output[0], output_shape.output[0]]) + # compute pads + left_pads = ctx.make_node('Slice', [main_loop.output[1], minus_two, minus_one, + minus_one]) + flattened_left_pads = ctx.make_node('Reshape', [left_pads.output[0], minus_one]) + min_left_pads = ctx.make_node('ReduceMin', [flattened_left_pads.output[0]]) + right_pads = ctx.make_node('Slice', [main_loop.output[1], minus_one, two, + minus_one]) + flattened_right_pads = ctx.make_node('Reshape', [right_pads.output[0], minus_one]) + min_right_pads = ctx.make_node('ReduceMin', [flattened_right_pads.output[0]]) + # trim left pads + identity_left_sliced_graph = ctx.create_new_graph_with_same_config() + identity_left_sliced_graph.parent_graph = ctx + identity_left_sliced = identity_left_sliced_graph.make_node('Identity', [reshaped_output.output[0]]) + identity_left_sliced_graph.add_graph_output(identity_left_sliced.output[0], ctx.get_dtype(node.input[0]), + loop_output_shape) + output_left_sliced_graph = ctx.create_new_graph_with_same_config() + output_left_sliced_graph.parent_graph = ctx + output_left_sliced = output_left_sliced_graph.make_node('Slice', + [reshaped_output.output[0], min_left_pads.output[0], + min_dim.output[0], minus_one]) + output_left_sliced_graph.add_graph_output(output_left_sliced.output[0], ctx.get_dtype(node.input[0]), + loop_output_shape) + left_pads_greater_than_zero = ctx.make_node('Greater', [min_left_pads.output[0], zeo]) + branches = {"then_branch": output_left_sliced_graph, "else_branch": identity_left_sliced_graph} + final_output_left_sliced = ctx.make_node('If', [left_pads_greater_than_zero.output[0]], branches=branches) + # trim right pads + valid_right_dim = ctx.make_node('Sub', [min_dim.output[0], min_right_pads.output[0]]) + identity_right_sliced_graph = ctx.create_new_graph_with_same_config() + identity_right_sliced_graph.parent_graph = ctx + identity_right_sliced = identity_right_sliced_graph.make_node('Identity', [final_output_left_sliced.output[0]]) + identity_right_sliced_graph.add_graph_output(identity_right_sliced.output[0], ctx.get_dtype(node.input[0]), + loop_output_shape) + output_right_sliced_graph = ctx.create_new_graph_with_same_config() + output_right_sliced_graph.parent_graph = ctx + output_right_sliced = output_right_sliced_graph.make_node('Slice', [final_output_left_sliced.output[0], + zeo, + valid_right_dim.output[0], + minus_one]) + output_right_sliced_graph.add_graph_output(output_right_sliced.output[0], ctx.get_dtype(node.input[0]), + loop_output_shape) + right_dim_greater_than_valid = ctx.make_node('Greater', [min_dim.output[0], valid_right_dim.output[0]]) + branches = {"then_branch": output_right_sliced_graph, "else_branch": identity_right_sliced_graph} + final_output_right_sliced = ctx.make_node('If', [right_dim_greater_than_valid.output[0]], branches=branches) + # squeeze output + latest_shape = ctx.make_node('Shape', [final_output_right_sliced.output[0]]) + latest_depth = ctx.make_node('Slice', + [latest_shape.output[0], minus_two, minus_one]) + need_squeeze = ctx.make_node('Equal', [latest_depth.output[0], one]) + identity_sliced_graph = ctx.create_new_graph_with_same_config() + identity_sliced_graph.parent_graph = ctx + identity_sliced = identity_sliced_graph.make_node('Identity', [final_output_right_sliced.output[0]]) + identity_sliced_graph.add_graph_output(identity_sliced.output[0], ctx.get_dtype(node.input[0]), + raw_output_shape + [-1]) + squeeze_sliced_graph = ctx.create_new_graph_with_same_config() + squeeze_sliced_graph.parent_graph = ctx + squeeze_sliced = GraphBuilder(squeeze_sliced_graph).make_squeeze( + {'data': final_output_right_sliced.output[0], 'axes': [-2]}, return_node=True) + squeeze_sliced_graph.add_graph_output(squeeze_sliced.output[0], ctx.get_dtype(node.input[0]), raw_output_shape) + shapes = node.output_shapes + dtypes = node.output_dtypes + ctx.remove_node(node.name) + branches = {"then_branch": squeeze_sliced_graph, "else_branch": identity_sliced_graph} + squeeze_if = ctx.make_node('If', [need_squeeze.output[0]], name=node.name, outputs=node.output, shapes=shapes, + dtypes=dtypes, branches=branches) + + @classmethod + def any_version_after12(cls, opset, ctx, node, **kwargs): + + # assemble MatrixDiagPart V2&V3 + m = node.input[0] + m_shape = ctx.get_shape(m) + m_rank = len(m_shape) + pads = np.zeros(2 * m_rank, dtype=np.int64) + pads[-2:] = [1, 1] + utils.make_sure(m_rank > 1, 'Input data should be at least 2D %s', str(m_shape)) + + align = 'LEFT_LEFT' + if node.op.op_type == 'MatrixDiagPartV3': + align = node.get_attr_str('align') if 'align' in node.attr else 'LEFT_RIGHT' + xalign, yalign = align.split('_') + + # consts + const_zero_float, const_neg_one_float = [n.output[0] for n in ctx.make_consts([0, -1], np.float32)] + const_zero, const_one, const_neg_one, const_neg_two, const_pad_vals, const_t = \ + [n.output[0] for n in ctx.make_consts([[0], [1], [-1], [-2], pads, [-1, 1]])] + const_zero_scalar, const_one_scalar, const_neg_one_scalar = \ + [n.output[0] for n in ctx.make_consts([0, 1, -1])] + + m_shape = ctx.make_node('Shape', [node.input[0]]).output[0] + xlen = ctx.make_node('Gather', [m_shape, const_neg_one]).output[0] + ylen = ctx.make_node('Gather', [m_shape, const_neg_two]).output[0] + xlenp = ctx.make_node('Add', [xlen, const_one]).output[0] + stride = ctx.make_node('Add', [xlenp, const_one]).output[0] + minxy_0 = ctx.make_node('Concat', [xlen, ylen], attr={'axis': 0}).output[0] + minxy = ctx.make_node('ReduceMin', [minxy_0]).output[0] + minxy_float = ctx.make_node('Cast', [minxy], attr={'to': TensorProto.FLOAT}).output[0] + xmax_0 = ctx.make_node('Mul', [xlen, xlenp]).output[0] + xmax_1 = ctx.make_node('Add', [xmax_0, xlenp]).output[0] + xmax = ctx.make_node('Add', [xmax_1, const_neg_one]).output[0] + ymax_0 = ctx.make_node('Mul', [xlenp, ylen]).output[0] + ymax = ctx.make_node('Add', [ymax_0, const_neg_one]).output[0] + ymax_float = ctx.make_node('Cast', [ymax], attr={'to': TensorProto.FLOAT}).output[0] + partial_shape = ctx.make_node('Slice', [m_shape, const_zero, const_neg_two]).output[0] + m2_shape = ctx.make_node('Concat', [partial_shape, const_neg_one], attr={'axis': 0}).output[0] + gather_shape = ctx.make_node('Concat', [partial_shape, const_one], attr={'axis': 0}).output[0] + + def normalize(): + raw_input1 = ctx.make_node('Cast', [node.input[1]], attr={'to': TensorProto.INT64}).output[0] + return ctx.make_node('Reshape', [raw_input1, const_neg_one]) + + # get k0, k1 values. diags to be extracted + input1 = normalize() + k0 = ctx.make_node('ReduceMin', [input1.output[0]]).output[0] + k1 = ctx.make_node('ReduceMax', [input1.output[0]]).output[0] + k0_scalar = ctx.make_node('Squeeze', [k0]).output[0] + k1_scalar = ctx.make_node('Squeeze', [k1]).output[0] + m_padded = ctx.make_node('Pad', [m, const_pad_vals, node.input[2]]) + + # starting indexes for super diagonals + xstart_0 = ctx.make_node('Cast', [k0_scalar], attr={'to': TensorProto.FLOAT}) + xstart_1 = ctx.make_node('Max', [const_zero_float, xstart_0.output[0]]) + xstart_2 = ctx.make_node('Cast', [xstart_1.output[0]], attr={'to': TensorProto.INT64}) + xstart_3 = ctx.make_node('Add', [xstart_2.output[0], const_neg_one_scalar]) + xstart_4 = ctx.make_node('Range', [k1_scalar, xstart_3.output[0], const_neg_one_scalar]) + xstart = ctx.make_node('Reshape', [xstart_4.output[0], const_t]) + + # starting indexes for sub diagonals + ystart_0 = ctx.make_node('Cast', [k1_scalar], attr={'to': TensorProto.FLOAT}) + ystart_1 = ctx.make_node('Min', [const_neg_one_float, ystart_0.output[0]]) + ystart_2 = ctx.make_node('Cast', [ystart_1.output[0]], attr={'to': TensorProto.INT64}) + ystart_3 = ctx.make_node('Add', [k0_scalar, const_neg_one_scalar]) + ystart_4 = ctx.make_node('Range', [ystart_2.output[0], ystart_3.output[0], const_neg_one_scalar]) + ystart = ctx.make_node('Reshape', [ystart_4.output[0], const_t]) + + xmax_0 = ctx.make_node('Mul', [xstart.output[0], xlenp]) + xmax = ctx.make_node('Sub', [xmax, xmax_0.output[0]]) + xmax_float = ctx.make_node('Cast', [xmax.output[0]], attr={'to': TensorProto.FLOAT}) + + # lengths of super/sub diags to extract + xsize_0 = ctx.make_node('Sub', [xlen, xstart.output[0]]) + xsize_1 = ctx.make_node('Cast', [xsize_0.output[0]], attr={'to': TensorProto.FLOAT}) + xsize_2 = ctx.make_node('Min', [xsize_1.output[0], minxy_float]) + xsize = ctx.make_node('Cast', [xsize_2.output[0]], attr={'to': TensorProto.INT64}) + ysize_0 = ctx.make_node('Add', [ylen, ystart.output[0]]) + ysize_1 = ctx.make_node('Cast', [ysize_0.output[0]], attr={'to': TensorProto.FLOAT}) + ysize_2 = ctx.make_node('Min', [ysize_1.output[0], minxy_float]) + ysize = ctx.make_node('Cast', [ysize_2.output[0]], attr={'to': TensorProto.INT64}) + diagsize = ctx.make_node('Concat', [xsize.output[0], ysize.output[0]], attr={'axis': 0}) + maxsize = ctx.make_node('ReduceMax', [diagsize.output[0]], attr={'keep_dims': 0}) + maxsize_0 = ctx.make_node('Reshape', [maxsize.output[0], const_neg_one]) + maxsize_scalar = ctx.make_node('Squeeze', [maxsize.output[0]]) + + diagdistances_0 = ctx.make_node('Range', [const_zero_scalar, maxsize_scalar.output[0], const_one_scalar]) + diagdistances = ctx.make_node('Mul', [diagdistances_0.output[0], stride]) + + def right_align(sizes, indices, starts, maxval): + op1 = ctx.make_node('Sub', [maxsize.output[0], sizes.output[0]]) + op2 = ctx.make_node('Mul', [op1.output[0], stride]) + op3 = ctx.make_node('Sub', [indices.output[0], op2.output[0]]) + op4 = ctx.make_node('Less', [op3.output[0], starts.output[0]]) + op5 = ctx.make_node('Where', [op4.output[0], maxval, op3.output[0]]) + return op5 + + # xdiags, ydiags contain indices of diagonal elements + xdiags_0 = ctx.make_node('Add', [xstart.output[0], diagdistances.output[0]]) + xdiags_1 = ctx.make_node('Cast', [xdiags_0.output[0]], attr={'to': TensorProto.FLOAT}) + if xalign == 'RIGHT': + xdiags = right_align(xsize, xdiags_0, xstart, ymax) + else: + xdiags_2 = ctx.make_node('Min', [xdiags_1.output[0], xmax_float.output[0]]) + xdiags = ctx.make_node('Cast', [xdiags_2.output[0]], attr={'to': TensorProto.INT64}) + + ydiags_0_ = ctx.make_node('Abs', [ystart.output[0]]) + ydiags_1 = ctx.make_node('Mul', [ydiags_0_.output[0], xlenp]) + ydiags_2 = ctx.make_node('Add', [ydiags_1.output[0], diagdistances.output[0]]) + ydiags_3 = ctx.make_node('Cast', [ydiags_2.output[0]], attr={'to': TensorProto.FLOAT}) + if yalign == 'RIGHT': + ydiags = right_align(ysize, ydiags_2, ydiags_1, ymax) + else: + ydiags_4 = ctx.make_node('Min', [ydiags_3.output[0], ymax_float]) + ydiags = ctx.make_node('Cast', [ydiags_4.output[0]], attr={'to': TensorProto.INT64}) + + # flatten last dimension of matrix + m2 = ctx.make_node('Reshape', [m_padded.output[0], m2_shape]) + + diags_0 = ctx.make_node('Concat', [xdiags.output[0], ydiags.output[0]], attr={'axis': 0}) + diags_1 = ctx.make_node('Reshape', [diags_0.output[0], const_neg_one]) + diags_2 = ctx.make_node('Expand', [diags_1.output[0], gather_shape]) + diags = ctx.make_node('GatherElements', [m2.output[0], diags_2.output[0]], attr={'axis': -1}) + + def compute_out_shape(k0_k1_same=False): + g = ctx.create_new_graph_with_same_config() + g.parent_graph = ctx + if k0_k1_same: + dims = [partial_shape, maxsize_0.output[0]] + else: + dims = [partial_shape, const_neg_one, maxsize_0.output[0]] + outshape = g.make_node('Concat', dims, attr={'axis': 0}) + g.add_graph_output(outshape.output[0], TensorProto.INT64, [-1]) + return g + + # if k0=k1, rank of output matrix is 1 less than usual + # hence, need 'If' to compute right output matrix shape + k0_k1_same = ctx.make_node('Equal', [k1, k0]) + branches = {'then_branch': compute_out_shape(True), 'else_branch': compute_out_shape(False)} + if_node = ctx.make_node('If', [k0_k1_same.output[0]], branches=branches) + + shapes = ctx.get_shape(node.output[0]) + dtypes = node.output_dtypes + ctx.remove_node(node.name) + ctx.make_node('Reshape', [diags.output[0], if_node.output[0]], name=node.name, outputs=node.output, + shapes=[shapes], dtypes=dtypes) + + for consumer in ctx.find_output_consumers(node.output[0]): + if consumer.type == 'Identity': + ctx.set_shape(consumer.output[0], shapes) + + @classmethod + def version_12(cls, ctx, node, **kwargs): + cls.any_version_after12(12, ctx, node, **kwargs) + + @classmethod + def version_13(cls, ctx, node, **kwargs): + # Parameters moved to inputs for operator Squeeze, Unsqueeze. + cls.any_version_after12(13, ctx, node, **kwargs) + + +@tf_op(["MatrixDiag", "MatrixDiagV2", "MatrixDiagV3"]) +class MatrixDiag: + @classmethod + def version_12(cls, ctx, node, **kwargs): + # Assemble MatrixDiagV3 by ReverseSequence + argc = len(node.input) + + if ctx.opset >= 13: + squeeze_axes0 = ctx.make_const(utils.make_name("const_axes"), np.array([0], dtype=np.int64)).output[0] + squeeze_axes_1 = ctx.make_const(utils.make_name("const_axes"), np.array([-1], dtype=np.int64)).output[0] + squeeze_axes_2 = ctx.make_const(utils.make_name("const_axes"), np.array([-2], dtype=np.int64)).output[0] + + minus_two, minus_one, zeo, one, two = \ + [n.output[0] for n in ctx.make_consts([[-2], [-1], [0], [1], [2]])] + + def mknode(op, args, **kwargs): + return ctx.make_node(op, args, **kwargs).output[0] + + def mknode2(g, op, args, **kwargs): + return g.make_node(op, args, **kwargs).output[0] + + def normalize(name): + # normalize arguments + casted = mknode("Cast", [name], attr={'to': TensorProto.INT64}) + reshaped = mknode("Reshape", [casted, minus_one]) + return reshaped + + def cast(name): + return mknode("Cast", [name], attr={"to": ctx.get_dtype(node.input[0])}) + + def processdiag(): + # unsqueeze diag if necessary + diag = node.input[0] + shape = ctx.get_shape(diag) + if len(shape) == 1: + if ctx.opset < 13: + diag = mknode("Unsqueeze", [diag], attr={"axes": [0]}) + else: + diag = mknode("Unsqueeze", [diag, squeeze_axes0]) + shape = [1] + shape + ctx.set_shape(diag, shape) + + diag_shape = mknode("Shape", [diag]) + diag_depth = mknode("Slice", [diag_shape, minus_two, minus_one]) + k = normalize(node.input[1]) if argc > 1 else zeo + k_min, k_max = mknode("ReduceMin", [k]), mknode("ReduceMax", [k]) + k_max_nxt = mknode("Add", [k_max, one]) + k_depth = mknode("Sub", [k_max_nxt, k_min]) + equal = mknode("Equal", [k_depth, diag_depth]) + + def id_diag(): + g = ctx.create_new_graph_with_same_config() + g.parent_graph = ctx + idt = mknode2(g, "Identity", [diag]) + g.add_graph_output(idt, ctx.get_dtype(node.input[0]), ctx.get_shape(diag)) + return g + + def ex_diag(): + g = ctx.create_new_graph_with_same_config() + g.parent_graph = ctx + if ctx.opset < 13: + ex = mknode2(g, "Unsqueeze", [diag], attr={"axes": [-2]}) + else: + ex = mknode2(g, "Unsqueeze", [diag, squeeze_axes_2]) + rank = len(ctx.get_shape(diag)) + 1 + g.add_graph_output(ex, ctx.get_dtype(node.input[0]), [-1] * rank) + return g + + branches = {"then_branch": id_diag(), "else_branch": ex_diag()} + expand_diag = ctx.make_node("If", [equal], branches=branches) + return expand_diag.output[0], k, k_min, k_max, k_max_nxt + + def squeeze_12(name): + return ctx.make_node("Squeeze", [name], attr={"axis": -1}).output[0] + + def squeeze_13(name): + return ctx.make_node("Squeeze", [name, squeeze_axes_1]).output[0] + + squeeze = squeeze_12 if ctx.opset < 13 else squeeze_13 + + # gather inputs + diag, k, k_min, k_max, k_max_nxt = processdiag() + row, col, pad, align = normalize(node.input[2]) if argc > 2 else minus_one, \ + normalize(node.input[3]) if argc > 3 else minus_one, \ + node.input[4] if argc > 4 else cast(zeo), \ + node.get_attr_str("align") if "align" in node.attr else "LEFT_LEFT" + + diag_shape = mknode("Shape", [diag]) + diag_rank = mknode("Shape", [diag_shape]) + head_shape = mknode("Slice", [diag_shape, zeo, minus_two]) + tail_shape = mknode("Slice", [diag_shape, minus_two, diag_rank]) + diag_width = mknode("Slice", [diag_shape, minus_one, diag_rank]) + diag_depth = mknode("Slice", [diag_shape, minus_two, minus_one]) + k_range = mknode("Range", [squeeze(k_min), squeeze(k_max_nxt), squeeze(one)]) + abs_k_range = mknode("Abs", [k_range]) + min_k2zeo = mknode("ReduceMin", [abs_k_range]) + max_diag_len = mknode("Add", [min_k2zeo, diag_width]) + + def outrowcol(): + # get output matrix shape + row_set = mknode("Greater", [row, zeo]) + col_set = mknode("Greater", [col, zeo]) + + def rowset(): + # if row is set + g = ctx.create_new_graph_with_same_config() + g.parent_graph = ctx + + def rowsetcolset(): + # if col is set + gg = g.create_new_graph_with_same_config() + id_row = mknode2(gg, "Identity", [row]) + id_col = mknode2(gg, "Identity", [col]) + shape = mknode2(gg, "Concat", [id_row, id_col], attr={"axis": -1}) + gg.parent_graph = g + gg.add_graph_output(shape, TensorProto.INT64, [-1]) + return gg + + def rowsetcolnotset(): + # if col is not set + gg = g.create_new_graph_with_same_config() + gg.parent_graph = g + id_row = mknode2(gg, "Identity", [row]) + id_diag_width = mknode2(gg, "Identity", [diag_width]) + shape = mknode2(gg, "Concat", [id_row, id_diag_width], attr={"axis": -1}) + gg.add_graph_output(shape, TensorProto.INT64, [-1]) + return gg + + branches = {"then_branch": rowsetcolset(), "else_branch": rowsetcolnotset()} + if_col_set = g.make_node("If", [col_set], branches=branches) + g.add_graph_output(if_col_set.output[0], TensorProto.INT64, [-1]) + return g + + def rownotset(): + # if row is not set + g = ctx.create_new_graph_with_same_config() + g.parent_graph = ctx + + def rownotsetcolset(): + # if col is set + gg = g.create_new_graph_with_same_config() + gg.parent_graph = g + id_diag_width = gg.make_node("Identity", [diag_width]).output[0] + id_col = gg.make_node("Identity", [col]).output[0] + shape = gg.make_node("Concat", [id_diag_width, id_col], attr={"axis": -1}).output[0] + gg.add_graph_output(shape, TensorProto.INT64, [-1]) + return gg + + def rownotsetcolnotset(): + # if col is not set + gg = g.create_new_graph_with_same_config() + gg.parent_graph = g + id_max_diag_len = gg.make_node("Identity", [max_diag_len]).output[0] + shape = gg.make_node("Concat", [id_max_diag_len, id_max_diag_len], attr={"axis": -1}).output[0] + gg.add_graph_output(shape, TensorProto.INT64, [-1]) + return gg + + branches = {"then_branch": rownotsetcolset(), "else_branch": rownotsetcolnotset()} + if_col_set = g.make_node("If", [col_set], branches=branches) + g.add_graph_output(if_col_set.output[0], TensorProto.INT64, [-1]) + return g + + branches = {"then_branch": rowset(), "else_branch": rownotset()} + if_row_set = ctx.make_node("If", [row_set], branches=branches) + return if_row_set.output[0] + + out_shape = outrowcol() + out_row = mknode("Slice", [out_shape, zeo, one]) + out_col = mknode("Slice", [out_shape, one, two]) + k_btm = mknode("Sub", [one, out_row]) # lowest possible k + + def getklens(): + # return diag len of all ks + rwcl_min = mknode("Min", [out_row, out_col]) + rwcl_gap = mknode("Sub", [out_row, out_col]) + absl_gap = mknode("Abs", [rwcl_gap]) + left_btm = mknode("Range", [squeeze(one), squeeze(rwcl_min), squeeze(one)]) + riht_top = mknode("Abs", [mknode("Sub", [left_btm, rwcl_min])]) + klen_mid = mknode("Expand", [rwcl_min, mknode("Add", [absl_gap, one])]) + return mknode("Concat", [left_btm, klen_mid, riht_top], attr={"axis": -1}) + + k_lens = getklens() + + def reverseseq(args): + return mknode("ReverseSequence", args, attr={"batch_axis": 0, "time_axis": 1}) + + def reverse1d(name): + # reverse an array + shape = mknode("Shape", [name]) + temp_shape = mknode("Concat", [minus_one, shape], attr={"axis": -1}) + reshaped = mknode("Reshape", [name, temp_shape]) + rev = reverseseq([reshaped, shape]) + return mknode("Reshape", [rev, shape]) + + def sortdiag(): + # sort diag to "LEFT_RIGHT" so each col form a line of the out matrix + k_sup_stt = mknode("Sub", [mknode("Max", [zeo, k_min]), k_btm]) + k_sup_end = mknode("Sub", [k_max_nxt, k_btm]) + k_sup_len = mknode("Max", [zeo, mknode("Sub", [k_sup_end, k_sup_stt])]) + k_sub_stt = mknode("Sub", [k_min, k_btm]) + k_sub_end = mknode("Sub", [mknode("Min", [zeo, k_max_nxt]), k_btm]) + k_sub_len = mknode("Max", [zeo, mknode("Sub", [k_sub_end, k_sub_stt])]) + sup_k_lens = mknode("Slice", [k_lens, k_sup_stt, k_sup_end]) + sub_k_lens = mknode("Slice", [k_lens, k_sub_stt, k_sub_end]) + all_k_lens = mknode("Concat", [sub_k_lens, sup_k_lens], attr={"axis": -1}) + max_k_len = mknode("ReduceMax", [all_k_lens]) + top_k_len = mknode("Slice", [all_k_lens, minus_one, diag_depth]) + btm_k_len = mknode("Slice", [all_k_lens, zeo, one]) + diag_rev_shap = mknode("Concat", [minus_one, diag_width], attr={"axis": -1}) + reshaped_diag = mknode("Reshape", [diag, diag_rev_shap]) + rev_shape = mknode("Slice", [diag_shape, zeo, minus_one]) + + sup_rev_len_1 = mknode("Expand", [one, k_sup_len]) if align.startswith("LEFT") else mknode("Expand", + [diag_width, + k_sup_len]) + sub_rev_len_1 = mknode("Expand", [one, k_sub_len]) if align.endswith("RIGHT") else sub_k_lens + cnt_rev_len_1 = mknode("Concat", [sub_rev_len_1, sup_rev_len_1], attr={"axis": -1}) + exp_rev_len_1 = mknode("Expand", [reverse1d(cnt_rev_len_1), rev_shape]) + + sup_rev_len_2 = mknode("Expand", [one, k_sup_len]) if align.startswith("LEFT") else sup_k_lens + sub_rev_len_2 = mknode("Expand", [one, k_sub_len]) if align.endswith("RIGHT") else mknode("Expand", + [diag_width, + k_sub_len]) + cnt_rev_len_2 = mknode("Concat", [sub_rev_len_2, sup_rev_len_2], attr={"axis": -1}) + exp_rev_len_2 = mknode("Expand", [reverse1d(cnt_rev_len_2), rev_shape]) + + reversed_diag_1 = reverseseq([reshaped_diag, mknode("Reshape", [exp_rev_len_1, minus_one])]) + reversed_diag_2 = reverseseq([reversed_diag_1, mknode("Reshape", [exp_rev_len_2, minus_one])]) + + return mknode("Reshape", [reversed_diag_2, diag_shape]), \ + mknode("Sub", [max_k_len, top_k_len]), \ + mknode("Sub", [max_k_len, btm_k_len]) + + sorted_diag, top_pad, btm_pad = sortdiag() + + def trandiag(): + # transpose last two dim of diag + temp_shape = mknode("Concat", [minus_one, tail_shape], attr={"axis": -1}) + reshaped = mknode("Reshape", [sorted_diag, temp_shape]) + transposed = mknode("Transpose", [reshaped], attr={"perm": [0, 2, 1]}) + out_shape = mknode("Concat", [head_shape, reverse1d(tail_shape)], attr={"axis": -1}) + return mknode("Reshape", [transposed, out_shape]) + + tran_diag = trandiag() + + def relu1(name): + # all return values >= 1 + minusd = mknode("Sub", [name, one]) + casted = mknode("Cast", [minusd], attr={"to": TensorProto.FLOAT}) + relued = mknode("Relu", [casted]) + casted = mknode("Cast", [relued], attr={"to": TensorProto.INT64}) + return mknode("Add", [casted, one]) + + def makediagonal(): + # padding with required value and move lines so they form diagonals + shape = mknode("Shape", [tran_diag]) + rank = mknode("Shape", [shape]) + width = mknode("Slice", [shape, minus_one, rank]) + temp_shape = mknode("Concat", [minus_one, width], attr={"axis": -1}) + reshaped = mknode("Reshape", [tran_diag, temp_shape]) + left_pad, riht_pad = top_pad, mknode("Add", [btm_pad, diag_width]) + full_pad = mknode("Concat", [zeo, left_pad, zeo, riht_pad], attr={"axis": -1}) + diag_pad = mknode("Pad", [reshaped, full_pad, pad]) + diag_pad_shape = mknode("Shape", [diag_pad]) + diag_pad_width = mknode("Slice", [diag_pad_shape, one, two]) + exp_shape = mknode("Concat", [head_shape, diag_width], attr={"axis": -1}) + + def padleft(): + # set pads from left + fm = mknode("Add", [left_pad, left_pad]) + to = mknode("Sub", [fm, diag_width]) + rg = reverse1d(relu1(mknode("Range", [squeeze(fm), squeeze(to), squeeze(minus_one)]))) + expanded_range = mknode("Expand", [rg, exp_shape]) + reshaped_range = mknode("Reshape", [expanded_range, minus_one]) + pad_left = mknode("ReverseSequence", [diag_pad, reshaped_range], attr={"batch_axis": 0, "time_axis": 1}) + return mknode("Slice", [pad_left, left_pad, diag_pad_width, one]) + + pad_left = padleft() + + def padright(): + # set pads from right + pad_left_shape = mknode("Shape", [pad_left]) + pad_left_depth = mknode("Slice", [pad_left_shape, zeo, one]) + pad_left_width = mknode("Slice", [pad_left_shape, one, two]) + pad_full_lenth = mknode("Expand", [pad_left_width, pad_left_depth]) + rev = mknode("ReverseSequence", [pad_left, pad_full_lenth], attr={"batch_axis": 0, "time_axis": 1}) + fm = mknode("Add", [riht_pad, btm_pad]) + to = mknode("Sub", [fm, diag_width]) + rg = mknode("Range", [squeeze(fm), squeeze(to), squeeze(minus_one)]) + expanded_range = mknode("Expand", [rg, exp_shape]) + reshaped_range = mknode("Reshape", [expanded_range, minus_one]) + raw_pad_right = mknode("ReverseSequence", [rev, reshaped_range], + attr={"batch_axis": 0, "time_axis": 1}) + shape = mknode("Shape", [raw_pad_right]) + width = mknode("Slice", [shape, one, two]) + sliced = mknode("Slice", [raw_pad_right, btm_pad, width, one]) + all_width = mknode("Expand", [mknode("Sub", [width, btm_pad]), mknode("Shape", [reshaped_range])]) + return mknode("ReverseSequence", [sliced, all_width], attr={"batch_axis": 0, "time_axis": 1}) + + pad_right = padright() + + def diagonize(): + # move lines to right to form diagonals + fm = mknode("Sub", [diag_depth, btm_pad]) + to = mknode("Add", [fm, diag_width]) + rg = mknode("Range", [squeeze(fm), squeeze(to), squeeze(one)]) + expanded_range = mknode("Expand", [rg, exp_shape]) + reshaped_range = mknode("Reshape", [expanded_range, minus_one]) + rev = mknode("ReverseSequence", [pad_right, reshaped_range], + attr={"batch_axis": 0, "time_axis": 1}) + k_max_idx = mknode("Sub", [k_max, k_btm]) + k_max_idx_nxt = mknode("Add", [k_max_idx, one]) + k_max_len = mknode("Slice", [k_lens, k_max_idx, k_max_idx_nxt]) + k_gap = mknode("Sub", [mknode("Abs", [k_max]), min_k2zeo]) + width = mknode("Add", [k_max_len, k_gap]) + return mknode("Slice", [rev, zeo, width, one]), width + + diag, width = diagonize() + shape = mknode("Concat", [head_shape, diag_width, minus_one], attr={"axis": -1}) + return mknode("Reshape", [diag, shape]), diag_width, width + + new_diag, new_depth, new_width = makediagonal() + + def paddiag(): + # pad to output shape + pad_row, pad_col = mknode("Sub", [out_row, new_depth]), mknode("Sub", [out_col, new_width]) + pad_top = mknode("Max", [zeo, mknode("Sub", [zeo, k_max])]) + pad_lft = mknode("Max", [zeo, mknode("Sub", [k_min, zeo])]) + pad_btm = mknode("Sub", [pad_row, pad_top]) + pad_rht = mknode("Sub", [pad_col, pad_lft]) + pad_hlf = mknode("Mul", [zeo, head_shape]) + pad_ful = mknode("Concat", [pad_hlf, pad_top, pad_lft, pad_hlf, pad_btm, pad_rht], attr={"axis": -1}) + return mknode("Pad", [new_diag, pad_ful, pad]) + + padded = paddiag() + shapes = node.output_shapes + dtypes = node.output_dtypes + ctx.remove_node(node.name) + ctx.make_node("Identity", [padded], name=node.name, + outputs=node.output, shapes=shapes, dtypes=dtypes) + + @classmethod + def version_13(cls, ctx, node, **kwargs): + # Parameters moved to inputs for operator Squeeze, Unsqueeze. + cls.version_12(ctx, node, **kwargs) + + +@tf_op("MatrixSetDiagV3") +class MatrixSetDiagV3: + @classmethod + def version_12(cls, ctx, node, **kwargs): + # Assemble MatrixSetDiagV3 by MatrixDiagPartV3 and MatrixDiagV3 + + minus_two, minus_one, zeo, one = \ + [n.output[0] for n in ctx.make_consts([[-2], [-1], [0], [1]])] + + def mknode(op, args, **kwargs): + return ctx.make_node(op, args, **kwargs).output[0] + + def integer(name): + return mknode("Cast", [name], attr={"to": TensorProto.INT64}) + + def cast(name): + return mknode("Cast", [name], attr={"to": ctx.get_dtype(node.input[0])}) + + def normalize(): + k = node.input[2] + casted = mknode("Cast", [k], attr={"to": TensorProto.INT64}) + return mknode("Reshape", [casted, minus_one]) + + x = node.input[0] + diag = node.input[1] + k = normalize() + attr = {"align": node.get_attr_str("align")} + + shape = mknode("Shape", [x]) + rank = mknode("Shape", [shape]) + row = mknode("Slice", [shape, minus_two, minus_one]) + col = mknode("Slice", [shape, minus_one, rank]) + + # ones of x shape + zeos = mknode("Mul", [integer(x), zeo]) + ones = mknode("Add", [zeos, one]) + + # make diag of 1s + ones_diag = ctx.make_node("MatrixDiagPartV3", [ones, k, zeo], attr) + MatrixDiagPartV2V3.version_11(ctx, ones_diag) + # MatrixDiagPartV2V3.version_12(ctx, ones_diag) # todo: fix exception + + # make matrix of bool + ctx.set_dtype(ones_diag.output[0], TensorProto.INT64) + ones_matrix = ctx.make_node("MatrixDiagV3", [ones_diag.output[0], k, row, col, zeo], attr) + MatrixDiag.version_12(ctx, ones_matrix) + ones_bool = mknode("Equal", [ones_matrix.output[0], one]) + + # make matrix out of diag + diag_matrix = ctx.make_node("MatrixDiagV3", [diag, k, row, col, cast(zeo)], attr) + MatrixDiag.version_12(ctx, diag_matrix) + + shapes = node.output_shapes + dtypes = node.output_dtypes + ctx.remove_node(node.name) + mknode("Where", [ones_bool, diag_matrix.output[0], x], + name=node.name, outputs=node.output, shapes=shapes, dtypes=dtypes) + + +@tf_op("BroadcastTo") +class BroadcastTo: + @classmethod + def version_8(cls, ctx, node, **kwargs): + # broadcast by expanding + node.type = "Expand" + ctx.insert_new_node_on_input(node, "Cast", node.input[1], to=TensorProto.INT64) diff --git a/lib/python3.10/site-packages/tf2onnx/onnx_opset/traditionalml.py b/lib/python3.10/site-packages/tf2onnx/onnx_opset/traditionalml.py new file mode 100644 index 0000000000000000000000000000000000000000..84e647a27e30b288e14e9e56daa3e96ee5ef2ad7 --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/onnx_opset/traditionalml.py @@ -0,0 +1,14 @@ +# SPDX-License-Identifier: Apache-2.0 + + +""" +traditional ml +""" + +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals + +import logging + +logger = logging.getLogger(__name__) diff --git a/lib/python3.10/site-packages/tf2onnx/optimizer/__init__.py b/lib/python3.10/site-packages/tf2onnx/optimizer/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..4faa488f9a1ddfd02f27c56ea1bf513a36832345 --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/optimizer/__init__.py @@ -0,0 +1,79 @@ +# SPDX-License-Identifier: Apache-2.0 + +"""tf2onnx.optimizer module""" + +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals + +from collections import OrderedDict +import copy + +from .const_fold_optimizer import ConstFoldOptimizer +from .identity_optimizer import IdentityOptimizer +from .merge_duplicated_nodes_optimizer import MergeDuplicatedNodesOptimizer +from .transpose_optimizer import TransposeOptimizer +from .loop_optimizer import LoopOptimizer +from .back_to_back_optimizer import BackToBackOptimizer +from .upsample_optimizer import UpsampleOptimizer +from .const_dequantize_optimizer import ConstDequantizeOptimizer +from .. import logging + +# optimizer sequence need to be considered carefully +_optimizers = OrderedDict([ + ("optimize_transpose", TransposeOptimizer), + ("remove_redundant_upsample", UpsampleOptimizer), + ("fold_constants", ConstFoldOptimizer), + ("const_dequantize_optimizer", ConstDequantizeOptimizer), + ("loop_optimizer", LoopOptimizer), + # merge_duplication should be used after optimize_transpose + # for optimize_transpose may have some trans nodes that can be merge + ("merge_duplication", MergeDuplicatedNodesOptimizer), + ("remove_identity", IdentityOptimizer), + ("remove_back_to_back", BackToBackOptimizer), +]) + + +def _get_optimizers(): + return _optimizers + + +def optimize_graph(graph, catch_errors=True): + """ Optimize graph, return optimized graph. No throw if catch_errors is true""" + logger = logging.getLogger(__name__) + logger.info("Optimizing ONNX model") + + before = graph.dump_node_statistics() + opts = _get_optimizers() + continue_flag = True + while continue_flag: + continue_flag = False + for name, factory in opts.items(): + logger.verbose("Apply %s", name) + if catch_errors: + try: + current = copy.deepcopy(graph) + opt = factory() + graph = opt.optimize(current) or graph + continue_flag = continue_flag or opt.graph_been_opt + except Exception: # pylint: disable=broad-except + # if current optimizer fails, continue with other optimizers + logger.warning("Failed to apply %s", name, exc_info=1) + else: + opt = factory() + graph = opt.optimize(graph) + continue_flag = continue_flag or opt.graph_been_opt + + try: + graph.topological_sort(graph.get_nodes()) + except Exception: # pylint: disable=broad-except + logger.warning("Failed topological_sort", exc_info=1) + + after = graph.dump_node_statistics() + diff = copy.deepcopy(after) + diff.subtract(before) + diff = ["{} {} ({}->{})".format(k, str(v) if v < 0 else '+' + str(v), before.get(k, 0), after.get(k, 0)) + for k, v in sorted(diff.items()) if v != 0] + logger.info("After optimization: %s", ', '.join(diff) if diff else "no change") + + return graph diff --git a/lib/python3.10/site-packages/tf2onnx/optimizer/back_to_back_optimizer.py b/lib/python3.10/site-packages/tf2onnx/optimizer/back_to_back_optimizer.py new file mode 100644 index 0000000000000000000000000000000000000000..ef4e74e0e7f6286f32bed0ab3764e2e761fface4 --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/optimizer/back_to_back_optimizer.py @@ -0,0 +1,254 @@ +# SPDX-License-Identifier: Apache-2.0 + + +"""Back_To_Back Optimizer. + Collapse consecutive nodes into 1 node if possible. +""" + +from __future__ import unicode_literals + +import numpy as np +from tf2onnx.utils import ONNX_DTYPE_NAMES # lgtm[py/unsafe-cyclic-import] +from .optimizer_base import GraphOptimizerBase # lgtm[py/unsafe-cyclic-import] + +# pylint: disable=logging-not-lazy,unused-argument,missing-docstring,unused-variable,arguments-differ + +_func_map = {} + + +def _register_func(op_type): + def _internal_fun(func): + _func_map[op_type] = func + return func + + return _internal_fun + + +class BackToBackOptimizer(GraphOptimizerBase): + """Remove back-to-back nodes e.g. 'Cast' + """ + + def __init__(self): # pylint: disable=useless-super-delegation + super(BackToBackOptimizer, self).__init__() + + def _optimize(self, graph): + return self._apply_optimization(graph, self._optimize_at_current_graph_level) + + def _optimize_at_current_graph_level(self, g): + for optype, handler in _func_map.items(): + # candidate nodes for removal/optimization + nodes = [n for n in g.get_nodes() if n.type in optype] + + # topological sort of candidates + # simplifying assumption for back-to-back-optimizer is + # the op_types have 1 input, 1 output, but multiple consumers + has_dependencies = set() + consumer_node_ids = {n.output[0]: [] for n in nodes} + for n in nodes: + if n.input[0] in consumer_node_ids: + consumer_node_ids[n.input[0]].extend([n]) + has_dependencies.add(n.output[0]) + + # q = starting nodes with no dependencies + q = list(set(consumer_node_ids.keys()) - has_dependencies) + while q: + nodeid = q.pop(0) + node = g.get_node_by_output(nodeid, False) + consumer_nodes = consumer_node_ids[nodeid] + + if len(consumer_nodes) > 0: + all_consumers = g.find_output_consumers(node.output[0]) + if len(all_consumers) != len(consumer_nodes): + # if first node is used elsewhere, skip + continue + if set(node.output) & set(g.outputs): + # if this node is part of graph outputs, skip + continue + q2 = handler(g, node, consumer_nodes) + # add more nodes which can now be processed + q.extend(q2) + return g + + @staticmethod + @_register_func("Cast") + def _optimize_cast(g, node, consumer_nodes): + """remove long chains of cast ops""" + q2 = [] + type1 = node.get_attr('to').i + type1_name = ONNX_DTYPE_NAMES[type1] if type1 in ONNX_DTYPE_NAMES else '' + + # if parent node is cast node, and same type, delete this one + pnode = node.inputs[0] + if pnode.type == 'Cast': + type2 = pnode.get_attr('to').i + if type1 == type2: + for node2 in consumer_nodes: + g.replace_input(node2, node2.input[0], node.input[0], 0) + q2.append(node2.output[0]) + g.remove_node(node.name) + return q2 + + # otherwise, check consumer cast nodes for a target type + # that contains more information than current type + can_reduce = True + for node2 in consumer_nodes: + type2 = node2.get_attr('to').i + type2_name = ONNX_DTYPE_NAMES[type2] if type2 in ONNX_DTYPE_NAMES else '' + + if 'float' in type1_name or type1_name == 'double': + # high information type. ok to eliminate + pass + elif 'int' in type1_name: + # int* and uint* are mix of high and low information. + # for safety, keep the current node, unless type2 is bool, + # in which case it's ok to remove node + if type1 != type2 and type2_name != 'bool': + can_reduce = False + elif type1_name == 'bool': + # bool is low information, so don't eliminate + if type1 != type2: + can_reduce = False + elif type1_name == 'string': + # can always remove string + pass + else: + # some odd type, keep node + can_reduce = False + q2.append(node2.output[0]) + + if can_reduce: + for node2 in consumer_nodes: + g.replace_input(node2, node2.input[0], node.input[0], 0) + g.remove_node(node.name) + return q2 + + @staticmethod + @_register_func("Transpose") + def _optimize_transpose(g, node, consumer_nodes): + """remove long chains of transpose ops""" + t1 = list(node.get_attr('perm').ints) + q2 = [] + for node2 in consumer_nodes: + g.replace_input(node2, node2.input[0], node.input[0], 0) + t2 = list(node2.get_attr('perm').ints) + new_perm = [t1[i] for i in t2] + # check if node2 can be removed. otherwise only update + if new_perm == list(range(len(t2))): + # both nodes can be deleted + shape = g.get_shape(node2.output[0]) + dtype = g.get_dtype(node2.output[0]) + node2_consumers = g.find_output_consumers(node2.output[0]) + g.replace_all_inputs(node2.output[0], node.input[0], ops=node2_consumers) + g.remove_node(node2.name) + if set(node2.output) & set(g.outputs): + g.make_node("Identity", [node.input[0]], + outputs=node2.output, shapes=[shape], dtypes=[dtype]) + else: + node2.set_attr('perm', [t1[i] for i in t2]) + q2.append(node2.output[0]) + g.remove_node(node.name) + return q2 + + @staticmethod + @_register_func(('Squeeze', 'Unsqueeze')) + def _optimize_squeeze_unsqueeze(g, node, consumer_nodes): + """remove pairs of squeeze-unsqueeze nodes""" + if node.type != 'Squeeze' or len(consumer_nodes) != 1: + # no need to return any value, since not removing long chain of nodes + return [] + + node2 = consumer_nodes[0] + if node2.type != 'Unsqueeze': + return [] + + axes_match = False + if g.opset <= 12 and node.get_attr('axes').ints == node2.get_attr('axes').ints: + axes_match = True + + # In opset 13, axes is an input. Optional for squeeze op. + if g.opset >= 13 and len(node.input) == 2: + if node.input[1] == node2.input[1]: + axes_match = True + elif node.inputs[1].is_const() and node2.inputs[1].is_const() and \ + node.inputs[1].get_tensor_value(as_list=True) == node2.inputs[1].get_tensor_value(as_list=True): + axes_match = True + + # if squeeze followed by unsqueeze is on diff axes, skip + if not axes_match: + return [] + + # if unsqueeze output is graph output, skip + if set(node2.output) & set(g.outputs): + return [] + + node2_consumers = g.find_output_consumers(node2.output[0]) + g.replace_all_inputs(node2.output[0], node.input[0], ops=node2_consumers) + g.remove_node(node.name) + g.remove_node(node2.name) + return [] + + @staticmethod + @_register_func(('Conv', 'BatchNormalization')) + def _optimize_conv_batchnorm_fusion(g, node, consumer_nodes): + """fuse conv and batchnorm""" + if node.type != 'Conv' or len(consumer_nodes) != 1: + # can only fuse 1 conv + batchnorm + return [] + + node2 = consumer_nodes[0] + if node2.type != 'BatchNormalization': + return [] + + # if batchnorm is a graph output, skip + if set(node2.output) & set(g.outputs): + return [] + + if not node.inputs[1].is_const(): + return [] + weights = node.inputs[1].get_tensor_value(as_list=False) + # if not 4D, NCHW skip + if len(weights.shape) != 4: + return [] + + # optional bias value + if len(node.inputs) > 2: + if not node.inputs[2].is_const(): + return [] + bias = node.inputs[2].get_tensor_value(as_list=False) + else: + bias = np.array(0, dtype=weights.dtype) + + # scale, offset, mean, var be const, otherwise skip + if False in [node2.inputs[i].is_const() for i in [1, 2, 3, 4]]: + return [] + + # if bn outputs used elsewhere, cannot fuse + for i in range(1, len(node2.output)): + if g.find_output_consumers(node2.output[i]): + return [] + + weights = weights.transpose(2, 3, 1, 0) + scale = node2.inputs[1].get_tensor_value(as_list=False) + offset = node2.inputs[2].get_tensor_value(as_list=False) + mean = node2.inputs[3].get_tensor_value(as_list=False) + var = node2.inputs[4].get_tensor_value(as_list=False) + epsilon = node2.get_attr('epsilon').f + + scale_new = scale / np.sqrt(var + epsilon) + weights_new = weights * scale_new + weights_new = weights_new.transpose(3, 2, 0, 1) + bias_new = (bias - mean) * scale_new + offset + bias_new_const = g.make_const(node.name + '_bias_fused_bn', bias_new.astype(bias.dtype)) + weights_new_const = g.make_const(node.name + '_weights_fused_bn', weights_new.astype(weights.dtype)) + g.replace_inputs(node, [node.input[0], weights_new_const.output[0], bias_new_const.output[0]]) + + # fuse conv and bn, delete bn + node2_output = node2.output[:1] + node2_shape = g.get_shape(node2.output[0]) + node2_dtype = g.get_dtype(node2.output[0]) + g.remove_node(node2.name) + # the setter makes a copy + node.output = node2_output + g.set_shape(node2_output[0], node2_shape) + g.set_dtype(node2_output[0], node2_dtype) + return [] diff --git a/lib/python3.10/site-packages/tf2onnx/optimizer/const_dequantize_optimizer.py b/lib/python3.10/site-packages/tf2onnx/optimizer/const_dequantize_optimizer.py new file mode 100644 index 0000000000000000000000000000000000000000..1d7554baaab94680e90d59682271def919cfea8e --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/optimizer/const_dequantize_optimizer.py @@ -0,0 +1,112 @@ +# SPDX-License-Identifier: Apache-2.0 + + +"""const dequantize Optimizer. + if a dequantize op's inputs are const we may be able to fold it through the next op +""" + +from .optimizer_base import GraphOptimizerBase +from .const_fold_optimizer import ConstFoldOptimizer + +# pylint: disable=logging-not-lazy,unused-argument,missing-docstring + + +class ConstDequantizeOptimizer(GraphOptimizerBase): + + def __init__(self): # pylint: disable=useless-super-delegation + super(ConstDequantizeOptimizer, self).__init__() + + def _optimize(self, graph): + return self._apply_optimization(graph, self._optimize_at_current_graph_level) + + def _optimize_at_current_graph_level(self, graph): + graph_changed = True + while graph_changed: + graph_changed = False + ops = graph.get_nodes() + for op in ops: + if self._fold_node(op, graph): + graph_changed = True + self.graph_been_opt = True + return graph + + def _fold_node(self, node, graph): + """ if a dequantize op's inputs are const and it is fed into a tensor reshaping op, we can apply the op + directly to the quantized inputs. Returns True if the graph is changed. + """ + if node.type not in ["Transpose", "Reshape", "Unsqueeze"]: + return False + dequant_node = node.inputs[0] + if dequant_node.type != "DequantizeLinear": + return False + if len(graph.find_output_consumers(dequant_node.output[0])) > 1: + return False + if not self._all_inputs_are_const(node.inputs[1:]) or self._is_graph_output(node, graph): + return False + if not self._all_inputs_are_const(dequant_node.inputs): + return False + if len(dequant_node.inputs[1].get_tensor_value(as_list=False).flatten()) != 1: + # If using per-channel quantization, we must compute the new axis + old_axis = dequant_node.get_attr_value("axis") + input_shape = dequant_node.inputs[0].get_tensor_value(as_list=False).shape + new_axis = self.compute_new_axis(node, graph, old_axis, input_shape) + if new_axis is None: + return False + dequant_node.set_attr("axis", new_axis) + graph.replace_input(node, node.input[0], dequant_node.input[0], 0) + const_outputs = ConstFoldOptimizer.compute_const_folding(node, graph) + graph.replace_all_inputs(node.output[0], dequant_node.output[0]) + graph.remove_node(node.name) + dequant_const = dequant_node.inputs[0] + if len(graph.find_output_consumers(dequant_const.output[0])) > 1: + dequant_const = graph.copy_const(dequant_const) + graph.replace_input(dequant_node, dequant_node.input[0], dequant_const.output[0], 0) + dequant_const.set_tensor_value(const_outputs[0]) + return True + + @staticmethod + def _all_inputs_are_const(nodes): + return all(node.is_const() for node in nodes if node) + + @staticmethod + def _is_graph_output(node, graph): + node_out_set = set(node.output) + graph_out_set = set(graph.outputs) + return node_out_set.intersection(graph_out_set) + + @staticmethod + def compute_new_axis(node, graph, old_axis, input_shape): + if old_axis < 0: + old_axis += len(input_shape) + if node.type == "Transpose": + perm = node.get_attr_value("perm") + if perm is None: + return None + return perm.index(old_axis) + if node.type == "Reshape": + prod = 1 + for d in input_shape[:old_axis+1]: + prod *= d + new_shape = node.inputs[1].get_tensor_value(as_list=True) + new_prod = 1 + for i, d in enumerate(new_shape): + new_prod *= d + if new_prod == prod: + if new_shape[i] == input_shape[old_axis]: + return i + return None + return None + if node.type == "Unsqueeze": + if graph.opset >= 13: + axes = node.inputs[1].get_tensor_value(as_list=True) + else: + axes = node.get_attr_value("axes") + new_rank = len(input_shape) + len(axes) + axes = [axis if axis >= 0 else axis + new_rank for axis in axes] + for i in range(new_rank): + if i not in axes: + if old_axis == 0: + return i + old_axis -= 1 + return None + return None diff --git a/lib/python3.10/site-packages/tf2onnx/optimizer/const_fold_optimizer.py b/lib/python3.10/site-packages/tf2onnx/optimizer/const_fold_optimizer.py new file mode 100644 index 0000000000000000000000000000000000000000..ef5812d6c828c62bfc17bdbc23370e0e4a603761 --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/optimizer/const_fold_optimizer.py @@ -0,0 +1,155 @@ +# SPDX-License-Identifier: Apache-2.0 + + +"""const fold Optimizer. + if op's inputs are all const then do op computation when building the graph to improve performance + for example, input of transpose node is const then we can do transpose statically instead of at runtime +""" + +from .. import utils +from .optimizer_base import GraphOptimizerBase + +# pylint: disable=logging-not-lazy,unused-argument,missing-docstring + +# key is op_type, value is the function to compute outputs +# the schema of function is: inputs are(node, graph), output is a list of constant values. +_func_map = {} + + +def _register_func(op_type): + def _internal_fun(func): + _func_map[op_type] = func + return func + + return _internal_fun + + +class ConstFoldOptimizer(GraphOptimizerBase): + + def __init__(self): # pylint: disable=useless-super-delegation + super(ConstFoldOptimizer, self).__init__() + + def _optimize(self, graph): + return self._apply_optimization(graph, self._optimize_at_current_graph_level) + + def _optimize_at_current_graph_level(self, graph): + graph_changed = True + while graph_changed: + graph_changed = False + ops = graph.get_nodes() + for op in ops: + if self._should_skip(op): + continue + if self._fold_node(op, graph): + graph_changed = True + self.graph_been_opt = True + return graph + + @staticmethod + def _should_skip(node): + # only support onnx official op for now, op in other domain is not supported for now + if not utils.is_onnx_domain(node.domain): + return True + + if node.is_const() or node.is_graph_input(): + return True + + skip_type = ["Identity", "DequantizeLinear"] + if node.type in skip_type: + return True + + return False + + def _fold_node(self, node, graph): + """ if node's input are all const and it's not graph's output then it can be fold. + if node can be fold True will be return indicating that graph is changed + """ + if self._all_inputs_are_const(node.inputs) and not self._is_graph_output(node, graph): + process_func = _func_map.get(node.type, None) + if process_func: + const_outputs = process_func(node, graph) + self._replace_node_with_const(node, graph, const_outputs) + return True + self.logger.debug("need to add function to fold op %s whose op_type is %s", node.name, node.type) + return False + + @staticmethod + def compute_const_folding(node, graph): + return _func_map[node.type](node, graph) + + @staticmethod + def _all_inputs_are_const(nodes): + return all(node.is_const() for node in nodes if node) + + @staticmethod + def _is_graph_output(node, graph): + node_out_set = set(node.output) + graph_out_set = set(graph.outputs) + return node_out_set.intersection(graph_out_set) + + @staticmethod + def _replace_node_with_const(node, graph, vals): + utils.make_sure(len(node.output) == len(vals), "length of node outputs and const vals should be same") + for old_input, val in zip(node.output, vals): + const_node = graph.make_const(utils.make_name("const_fold_opt"), val) + graph.set_dtype(const_node.output[0], utils.map_numpy_to_onnx_dtype(val.dtype)) + graph.set_shape(const_node.output[0], val.shape) + graph.replace_all_inputs(old_input, const_node.output[0]) # ops=graph.get_nodes() + graph.remove_node(node.name) + + @staticmethod + @_register_func("Cast") + def _fold_cast(node, graph): + const_val = node.inputs[0].get_tensor_value(as_list=False) + np_dtype = utils.ONNX_TO_NUMPY_DTYPE[node.get_attr("to").i] + const_val_after_cast = const_val.astype(np_dtype) + return [const_val_after_cast] + + @staticmethod + @_register_func("Transpose") + def _fold_transpose(node, graph) -> list: + const_val = node.inputs[0].get_tensor_value(as_list=False) + perm_attr = node.get_attr("perm") + perm = perm_attr.ints if perm_attr else None + const_val_after_trans = const_val.transpose(perm) + return [const_val_after_trans] + + @staticmethod + @_register_func("Reshape") + def _fold_reshape(node, graph): + const_val_data = node.inputs[0].get_tensor_value(as_list=False) + const_val_shape = node.inputs[1].get_tensor_value(as_list=True) + data_shape = const_val_data.shape + for i, dim in enumerate(const_val_shape): + if dim == 0: + # In ORT a dim of 0 means the shape stays the same. + const_val_shape[i] = data_shape[i] + const_val_after_trans = const_val_data.reshape(const_val_shape) + return [const_val_after_trans] + + @staticmethod + @_register_func("Unsqueeze") + def _fold_unsqueeze(node, graph): + """ + numpy expand_dims only supports to unsqueeze one dim one time, so reshape is used to simplify the logic + """ + const_val = node.inputs[0].get_tensor_value(as_list=False) + if graph.opset >= 13: + axes = node.inputs[1].get_tensor_value(as_list=True) + else: + axes = list(node.get_attr("axes").ints) + shape_in = const_val.shape + dims_out = len(shape_in) + len(axes) + axes = [i if i >= 0 else i + dims_out for i in axes] + # calculate the shape of output accroding to onnx Unsqueeze's spec + # https://github.com/onnx/onnx/blob/master/docs/Operators.md#Unsqueeze + shape_in = iter(shape_in) + shape_out = [None] * dims_out + for ind in axes: + shape_out[ind] = 1 + for ind, val in enumerate(shape_out): + if val is None: + shape_out[ind] = next(shape_in) + + const_val_after_unsqueeze = const_val.reshape(shape_out) + return [const_val_after_unsqueeze] diff --git a/lib/python3.10/site-packages/tf2onnx/optimizer/identity_optimizer.py b/lib/python3.10/site-packages/tf2onnx/optimizer/identity_optimizer.py new file mode 100644 index 0000000000000000000000000000000000000000..360edf8c5b5cb0655b72e8de04f48f90b150dce2 --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/optimizer/identity_optimizer.py @@ -0,0 +1,85 @@ +# SPDX-License-Identifier: Apache-2.0 + + +"""Identity Optimizer. + Remove useless Identity node in graphs including subgraphs, but does not hurt model output names. +""" + +from __future__ import unicode_literals + +from .optimizer_base import GraphOptimizerBase + + +# pylint: disable=logging-not-lazy,unused-argument,missing-docstring,unused-variable,arguments-differ + + +class IdentityOptimizer(GraphOptimizerBase): + """Identity Optimizer.""" + + def __init__(self): # pylint: disable=useless-super-delegation + super(IdentityOptimizer, self).__init__() + + def _optimize(self, graph): + return self._apply_optimization(graph, self._optimize_at_current_graph_level) + + def _optimize_at_current_graph_level(self, g): + has_update = True + while has_update: + has_update = False + nodes = [n for n in g.get_nodes() if n.type == "Identity"] + for n in nodes: + if n.graph is None: + self.logger.debug("node has been removed from this graph, skip") + continue + + graph_outputs = set(n.output).intersection(g.outputs) + ret = False + if graph_outputs: + ret = self._handle_graph_output_identity(g, n, graph_outputs) + else: + ret = self._handle_non_graph_output_identity(g, n) + has_update = ret + if ret: + self.graph_been_opt = True + return g + + @staticmethod + def _handle_non_graph_output_identity(graph, identity): + old_name = identity.output[0] + new_name = identity.input[0] + graph.replace_all_inputs(old_name, new_name, ops=graph.get_nodes()) + graph.remove_node(identity.name) + return True + + def _handle_graph_output_identity(self, graph, identity, graph_outputs): + input_id = identity.input[0] + input_node = identity.inputs[0] + + if input_node.graph != graph: + # If input node is in parent graph, we don't handle it now + self.logger.debug("input node in parent graph, skip") + return False + + if input_node.is_graph_input(): + # Identity between input and output should not be removed. + self.logger.debug("skip identity between input and output") + return False + + output_id = identity.output[0] + output_shape = graph.get_shape(output_id) + output_dtype = graph.get_dtype(output_id) + if input_id in graph.outputs: + # input id already be graph output, so we cannot make that be another graph output. + # this Identity must be kept. + self.logger.debug("identity input already be graph output") + return False + + graph.remove_node(identity.name) + new_output = [output_id if o == input_id else o for o in input_node.output] + input_node.output = new_output + + graph.set_shape(output_id, output_shape) + graph.set_dtype(output_id, output_dtype) + + graph.replace_all_inputs(input_id, output_id, ops=graph.get_nodes()) + return True diff --git a/lib/python3.10/site-packages/tf2onnx/optimizer/loop_optimizer.py b/lib/python3.10/site-packages/tf2onnx/optimizer/loop_optimizer.py new file mode 100644 index 0000000000000000000000000000000000000000..88db97dd9f4afb54f1b14b8635411bf08bd563aa --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/optimizer/loop_optimizer.py @@ -0,0 +1,90 @@ +# SPDX-License-Identifier: Apache-2.0 + + +"""Loop Optimizer. + some op in loop's body graph can be moved out to the loop +""" + +from tf2onnx.utils import make_name, make_sure +from .optimizer_base import GraphOptimizerBase + + +# pylint: disable=logging-not-lazy,unused-argument,missing-docstring,unused-variable,arguments-differ + + +class LoopOptimizer(GraphOptimizerBase): + """Loop Optimizer.""" + + # a lot of terms used here come from loop's onnx spec + # https://github.com/onnx/onnx/blob/master/docs/Operators.md#Loop + def __init__(self): # pylint: disable=useless-super-delegation + super(LoopOptimizer, self).__init__() + + def _optimize(self, graph): + return self._apply_optimization(graph, self._optimize_at_current_graph_level) + + def _optimize_at_current_graph_level(self, g): + has_update = True + while has_update: + has_update = False + nodes = [n for n in g.get_nodes() if n.type == "Loop"] + for n in nodes: + has_update_tmp = self._try_move_transpose_out_of_body_graph(n) + if has_update_tmp: + has_update = True + self.graph_been_opt = True + return g + + @staticmethod + def consumer_nodes_num(graph, node): + make_sure(len(node.output) == 1, "only consider node with only one output") + res = len(graph.find_output_consumers(node.output[0])) + return res + + def _try_move_transpose_out_of_body_graph(self, loop_node): + # output node of body graph can be loop-carried-dependent, if so it can't be move out of the body graph + # return True if moving some nodes successfully + # for now, we only consider moving transpose + body_graph = loop_node.get_body_graphs()["body"] + parent_graph = loop_node.graph + scan_nodes_name_in_body, scan_node_in_parent = self._scan_outputs(loop_node) + scan_nodes = [body_graph.get_node_by_output(name) for name in scan_nodes_name_in_body] + graph_is_changed = False + for node, name_in_parent in zip(scan_nodes, scan_node_in_parent): + # 1 delete node in body graph if possible + # only consider two case: trans is output, or transpose > identity > output + need_process = False + if node.type == "Transpose" and self.consumer_nodes_num(body_graph, node) <= 1: + trans = node + new_output = node.input[0] + body_graph.remove_node(node.name) + need_process = True + elif node.type == "Identity" and node.inputs[0].type == "Transpose" \ + and self.consumer_nodes_num(body_graph, node) <= 1\ + and self.consumer_nodes_num(body_graph, node.inputs[0]) <= 1: + trans = node.inputs[0] + new_output = node.inputs[0].input[0] + body_graph.remove_node(node.inputs[0].name) + body_graph.remove_node(node.name) + need_process = True + + if need_process: + # 2 correct body graph's output + body_outputs = body_graph.outputs + body_outputs[body_outputs.index(node.output[0])] = new_output + # 3 insert new node in parent graph + ori_perm = list(trans.get_attr("perm").ints) + new_perm = [0] + [i + 1 for i in ori_perm] # body output's rank is m > rank of loop's output is m+1 + name = make_name("trans_moved_from_loop_body") + _ = parent_graph.insert_new_node_on_output("Transpose", name_in_parent, name, perm=new_perm) + graph_is_changed = True + + return graph_is_changed + + @classmethod + def _scan_outputs(cls, loop): + # loop has 2+N inputs; loop has N+K outputs; + # loop's body graph has 1+N+K outputs + loop_carried = len(loop.input) - 2 + body_graph = loop.get_body_graphs()["body"] + return body_graph.outputs[loop_carried + 1:], loop.output[loop_carried:] diff --git a/lib/python3.10/site-packages/tf2onnx/optimizer/merge_duplicated_nodes_optimizer.py b/lib/python3.10/site-packages/tf2onnx/optimizer/merge_duplicated_nodes_optimizer.py new file mode 100644 index 0000000000000000000000000000000000000000..8d876ce0c7c499459f2fb09ad023f65909bedc2a --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/optimizer/merge_duplicated_nodes_optimizer.py @@ -0,0 +1,120 @@ +# SPDX-License-Identifier: Apache-2.0 + + +"""Merge Duplicated Nodes Optimizer. + Remove duplicate nodes except identity nodes which should be handled by identity optimizer. + for example, node a is input of node b and node c, and computation of node b, c are same such as "abs" op. + then b and c can be merged into one node to avoid duplicated computation +""" + +from collections import defaultdict + +import numpy as np + +from .optimizer_base import GraphOptimizerBase + +# pylint: disable=logging-not-lazy,unused-argument,missing-docstring + + +class MergeDuplicatedNodesOptimizer(GraphOptimizerBase): + """Remove duplicate nodes. + """ + + def __init__(self): + super(MergeDuplicatedNodesOptimizer, self).__init__() + # used internally + self._graph_can_be_optimized = True + + def _optimize(self, graph): + return self._apply_optimization(graph, self._optimize_at_current_graph_level) + + def _optimize_at_current_graph_level(self, graph): + while self._graph_can_be_optimized: + self._graph_can_be_optimized = False + self._merge_duplicated_nodes(graph) + if self._graph_can_be_optimized: + self.graph_been_opt = True + return graph + + def _merge_duplicated_nodes(self, graph): + # "duplicated" means: op_type, input and attribute are same + # while attr is un-hashable so doesn't include it when grouping nodes + # we do hash the tensor data of const values + nodes_groups = self._group_nodes_by_type_inputs(graph) + for _, nodes_group in nodes_groups.items(): + if self._skip_node_type(nodes_group[0]): + continue + self._del_nodes_if_duplicated(nodes_group, graph) + + @staticmethod + def _group_nodes_by_type_inputs(graph): + res = defaultdict(list) + for node in graph.get_nodes(): + # default const of graph input cannot be merged + if node.is_graph_input_default_const(): + continue + tensor_data_hash = None + if node.is_const(): + # Many constants have the same size so this is helpful + tensor_data_hash = hash(node.attr['value'].t.raw_data) + res[(node.type, tuple(node.input), tensor_data_hash)].append(node) + return res + + def _del_nodes_if_duplicated(self, nodes_group, graph): + # input and op type of nodes in same group are same, + # and if their attributes are also same then they are duplicated + while len(nodes_group) > 1: + unprocessed_node = [] + nodes_to_process = [nodes_group[0]] + for node in nodes_group[1:]: + if self._have_equal_attr(node, nodes_to_process[0], graph): + nodes_to_process.append(node) + else: + unprocessed_node.append(node) + + self._merge_nodes_that_are_duplicated(nodes_to_process, graph) + nodes_group = unprocessed_node + + def _have_equal_attr(self, node_1, node_2, graph): + if node_1.attr == node_2.attr: + return True + # consts have a name attr that can differ among equal consts so they must be handled separately + if node_1.is_const() and node_2.is_const(): + # get_tensor_value is costly so that we check their shape first + shape_1 = graph.get_shape(node_1.output[0]) + shape_2 = graph.get_shape(node_2.output[0]) + if shape_1 is not None and shape_2 is not None and \ + shape_1 != shape_2: + return False + const_1 = node_1.get_tensor_value(as_list=False) + const_2 = node_2.get_tensor_value(as_list=False) + if const_1.dtype == const_2.dtype and \ + np.array_equal(const_1, const_2): + return True + return False + + def _merge_nodes_that_are_duplicated(self, nodes_to_process, graph): + # node's output may not all be used, so have to select the one that uses most of node's outputs + nodes_to_process.sort(key=self._len_of_node_output, reverse=True) + node_to_retain = nodes_to_process[0] + for node_to_delete in nodes_to_process[1:]: + # if one of the output is graph's output then it can't be deleted + if set(node_to_delete.output).intersection(set(graph.outputs)): + continue + for old_input, new_input in zip(node_to_delete.output, node_to_retain.output): + graph.replace_all_inputs(old_input, new_input) + graph.remove_node(node_to_delete.name) + self._graph_can_be_optimized = True + + @staticmethod + def _skip_node_type(node): + # identity node will be handled by identity optimizer so skip it + if node.type in ["Identity"]: + return True + if node.is_graph_input(): + return True + return False + + @staticmethod + def _len_of_node_output(node): + return len(node.output) diff --git a/lib/python3.10/site-packages/tf2onnx/optimizer/optimizer_base.py b/lib/python3.10/site-packages/tf2onnx/optimizer/optimizer_base.py new file mode 100644 index 0000000000000000000000000000000000000000..feed1199939d1030a908c720d52c244b137a2227 --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/optimizer/optimizer_base.py @@ -0,0 +1,76 @@ +# SPDX-License-Identifier: Apache-2.0 + + +"""Graph Optimizer Base""" + +from __future__ import unicode_literals + +import copy + +from .. import logging, utils + + +class GraphOptimizerBase(object): + """optimizer graph to improve performance + """ + + def __init__(self): + self._logger = logging.getLogger('.'.join(__name__.split('.')[:-1] + [self.__class__.__name__])) + self._graph_been_opt = False + + @property + def logger(self): + return self._logger + + @property + def is_debug_mode(self): + return utils.is_debug_mode() + + @property + def graph_been_opt(self): + return self._graph_been_opt + + @graph_been_opt.setter + def graph_been_opt(self, value): + self._graph_been_opt = value + + def optimize(self, graph): + """ Optimize graph, return optimized graph. """ + before = graph.dump_node_statistics() + + graph = self._optimize(graph) + graph.update_proto() + graph.delete_unused_nodes(graph.outputs) + + after = graph.dump_node_statistics() + self._print_stat_diff(before, after) + return graph + + def _optimize(self, graph): + """ Derived class should override this function. """ + raise NotImplementedError + + @staticmethod + def _apply_optimization(graph, optimize_func): + """ + optimize graph + will also optimize graph of nodes' + Args: + graph: the top level graph to be optimized + optimize_func: function to optimize graph + """ + graph = optimize_func(graph) + for node in graph.get_nodes(): + body_graphs = node.get_body_graphs() + if body_graphs: + for attr, b_g in body_graphs.items(): + b_g = GraphOptimizerBase._apply_optimization(b_g, optimize_func) + node.set_body_graph_as_attr(attr, b_g) + return graph + + def _print_stat_diff(self, before, after): + diff = copy.deepcopy(after) + diff.subtract(before) + diff = ["{} {} ({}->{})".format(k, str(v) if v < 0 else '+' + str(v), before.get(k, 0), after.get(k, 0)) + for k, v in sorted(diff.items()) if v != 0] + self.logger.verbose(', '.join(diff) if diff else "no change") diff --git a/lib/python3.10/site-packages/tf2onnx/optimizer/transpose_optimizer.py b/lib/python3.10/site-packages/tf2onnx/optimizer/transpose_optimizer.py new file mode 100644 index 0000000000000000000000000000000000000000..a94b8871815a322fb28472e3bf893e0f52f7e9db --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/optimizer/transpose_optimizer.py @@ -0,0 +1,829 @@ +# SPDX-License-Identifier: Apache-2.0 + + +"""Transpose Optimizer.""" + +from __future__ import unicode_literals +from collections import defaultdict + +import numpy as np +import onnx +from tf2onnx.constants import NCHW_TO_NHWC, NHWC_TO_NCHW, NCDHW_TO_NDHWC, NDHWC_TO_NCDHW +from .. import utils +from .optimizer_base import GraphOptimizerBase + + +# pylint: disable=logging-not-lazy,unused-argument,missing-docstring,abstract-method +# FIXME: +# pylint: disable=unused-variable + +def is_nhwc_transpose(transpose_node): + perm_attr = transpose_node.get_attr('perm') + return transpose_node.type == "Transpose" and perm_attr and perm_attr.ints in [NCHW_TO_NHWC, NCDHW_TO_NDHWC] + + +def is_nchw_transpose(transpose_node): + perm_attr = transpose_node.get_attr('perm') + return transpose_node.type == "Transpose" and perm_attr and perm_attr.ints in [NHWC_TO_NCHW, NDHWC_TO_NCDHW] + + +def is_useless_transpose(transpose_node): + perm_attr = transpose_node.get_attr('perm') + return transpose_node.type == "Transpose" and perm_attr and perm_attr.ints == list(range(len(perm_attr.ints))) + + +def get_transpose_rank(trans): + return len(trans.get_attr('perm').ints) + + +class TransposeOptimizer(GraphOptimizerBase): + """Transpose Optimizer.""" + + def __init__(self): + super(TransposeOptimizer, self).__init__() + + self._handler_map = {} + self._force_stop = {} + + self._initialize_handlers() + self._g = None + self._output_names = None + + @property + def nodes(self): + return self._g.get_nodes() + + def pre_optimize_action(self): + # make Reshape into a const, which then can be fused into Conv's weight for mobilenet_v1_75_192 + self._output_names = [self._g.get_node_by_output(out).name for out in self._g.outputs] + ops = self.nodes + constable_reshape_ops = [n for n in ops + if (n.type == "Reshape" + and n.inputs[0].is_const() + and n.inputs[1].is_const())] + for reshape_op in constable_reshape_ops: + target_t = reshape_op.inputs[0].get_tensor_value(as_list=False) + target_shape = reshape_op.inputs[1].get_tensor_value(as_list=True) + for i, dim in enumerate(target_shape): + if dim == 0: + # In ORT a dim of 0 means the shape stays the same. + target_shape[i] = target_t.shape[i] + new_data = np.reshape(target_t, target_shape) + const_name = reshape_op.output[0] + self._g.remove_node(reshape_op.name) + self._g.make_const(const_name, new_data) + + # point all children nodes inputs to the new node + for output_name in reshape_op.output: + for child in ops: + for i, name in enumerate(child.input): + if name == output_name: + child.input[i] = const_name + + self._g.topological_sort(self._g.get_nodes()) + + def post_optimize_action(self): + def _calculate_new_shape(graph, op): + input_shape = graph.get_shape(op.input[0]) + if input_shape.count(-1) <= 1: + if is_nchw_transpose(op): + new_shape = [input_shape[0], input_shape[-1]] + input_shape[1:-1] + else: + new_shape = [input_shape[0]] + input_shape[2:] + [input_shape[1]] + return graph.make_const(utils.make_name("new_shape"), np.array(new_shape, dtype=np.int64)).output[0] + + # reshape requires tha output shape can only contain one -1, if not some extra op needed. + input_shape = graph.make_node("Shape", [op.input[0]]).output[0] + indice = graph.make_const(utils.make_name("indice"), np.array(op.get_attr('perm').ints)).output[0] + + return graph.make_node("Gather", [input_shape, indice]).output[0] + + nodes = self.nodes + # if channel==1 or height==width==1, replace transpose with reshape + # replacing trans with reshape is because transpose will copy data even if this transpose doesn't nothing + need_sort = False + for op in nodes: + if op.type == "Transpose": + input_shape = self._g.get_shape(op.input[0]) + if not input_shape: + continue + + if (is_nchw_transpose(op) and (input_shape[-1] == 1 or (np.all(np.array(input_shape[1:-1]) == 1)))) \ + or (is_nhwc_transpose(op) and (input_shape[1] == 1 or (np.all(np.array(input_shape[2:]) == 1)))): + new_shape = _calculate_new_shape(self._g, op) + # replace transpose with reshape + self._g.remove_node(op.name) + self._g.make_node("Reshape", [op.input[0], new_shape], name=op.name, outputs=op.output) + need_sort = True + if need_sort: + self._g.topological_sort(self._g.get_nodes()) + + def merge_duplicated_transposes(self): + # strategy used in previous procedure is to move transpose nodes down if possible, + # and it means that when a node has n outputs then n transpose will be generated, + # so we should merge them back to one if they can't be eliminated in previous procedure. + graph = self._g + input_transposes_map = defaultdict(list) + for node in graph.get_nodes(): + if node.type == "Transpose" and node.get_attr("perm"): + key = (node.input[0], str(node.get_attr("perm").ints)) + input_transposes_map[key].append(node) + + for transposes in input_transposes_map.values(): + # merge transpose nodes into one: make nodes use the output of the first transpose node + transpose_out = transposes[0].output[0] + for node in transposes[1:]: + old_transpose_out = node.output[0] + graph.replace_all_inputs(old_transpose_out, transpose_out) # ops=graph.get_nodes() + + # dangling transpose nodes can be deleted + graph.delete_unused_nodes(graph.outputs) + + def _optimize(self, graph): + return self._apply_optimization(graph, self._optimize_at_current_graph_level) + + def _optimize_at_current_graph_level(self, graph): + self._g = graph + self.pre_optimize_action() + no_action = False + iteration_cnt = 0 + while not no_action: + no_action = True + nodes = self.nodes + self._force_stop = {} + for n in nodes: + if is_nhwc_transpose(n): + if self._handle_nhwc_tranpose(n): + no_action = False + self.graph_been_opt = True + iteration_cnt += 1 + # need break, because handler may change nodes set, making the n stale object + # referencing already deleted elements + break + + if is_useless_transpose(n): + no_action = False + iteration_cnt += 1 + self._remove_useless_tranpose(n) + break + # for debugging purpose + if "stop" in self._force_stop and self._force_stop["stop"] == 1: + break + + self.logger.debug("finish after " + str(iteration_cnt) + " iteration(s)") + + self.merge_duplicated_transposes() + self.post_optimize_action() + return self._g + + def _initialize_handlers(self): + self._handler_map = { + "Add": self._add_handler, + "ArgMax": self._arg_min_max_handler, + "ArgMin": self._arg_min_max_handler, + "Cast": self._simple_through_handler, + "Clip": self._simple_through_handler, + "Concat": self._concat_handler, + "Elu": self._simple_through_handler, + "Exp": self._simple_through_handler, + "Identity": self._identity_handler, + "LeakyRelu": self._simple_through_handler, + "Log": self._simple_through_handler, + "Max": self._maxmin_handler, + "Min": self._maxmin_handler, + "Mul": self._mul_handler, + "Pad": self._pad_handler, + "Reciprocal": self._simple_through_handler, + "ReduceLogSum": self._reduce_handler, + "ReduceLogSumExp": self._reduce_handler, + "ReduceMax": self._reduce_handler, + "ReduceMean": self._reduce_handler, + "ReduceMin": self._reduce_handler, + "ReduceProd": self._reduce_handler, + "ReduceSum": self._reducesum_handler, + "ReduceSumSquare": self._reduce_handler, + "Relu": self._simple_through_handler, + "Shape": self._shape_handler, + "Sigmoid": self._simple_through_handler, + "Sum": self._sum_handler, + "Slice": self._slice_handler, + "Split": self._split_handler, + "Softplus": self._simple_through_handler, + "Sqrt": self._simple_through_handler, + "Squeeze": self._squeeze_handler, + "Sub": self._sub_handler, + "Tanh": self._simple_through_handler, + "Transpose": self._transpose_handler, + "DequantizeLinear": self._quantize_handler, + "QuantizeLinear": self._quantize_handler, + } + + def _handle_node_having_branches(self, trans, node): + trans_rank = get_transpose_rank(trans) + # create transpose pairs if some input are not. + if not self._create_transpose_pairs_before_node(trans_rank, node): + return False + # make sure node's all input transpose all have only 1 consumer node, + # otherwise, it would impact their other output nodes + if self._nodes_has_single_consumer_node(node.inputs) and len(node.output) == 1: + self._create_transpose_pairs_after_node(trans_rank, node) + input_transposes = set(node.inputs) + for n in input_transposes: + n_input = n.input[0] + utils.make_sure(len(n.output) == 1, "only expect single output") + self._g.replace_all_inputs(n.output[0], n_input) # ops=self._g.get_nodes() + self._g.remove_node(n.name) + + utils.make_sure(len(node.output) == 1, "only expect single output") + # currently we assume node only has 1 output, for cases where it is more than 1 for example Split + # we need consider the fact that Split's multiple output will not always has data in NCHW/NHWC, + # it might be a different shape. + output_transposes = self._g.find_output_consumers(node.output[0]) + for n in output_transposes: + n_input = n.input[0] + utils.make_sure(len(n.output) == 1, "only expect single output") + self._g.replace_all_inputs(n.output[0], n_input) # ops=self._g.get_nodes() + self._g.remove_node(n.name) + + shape = self._g.get_shape(node.output[0]) + if shape: + # only nhwc transpose can reach here + perm = NHWC_TO_NCHW if trans_rank == 4 else NDHWC_TO_NCDHW + new_shape = [shape[i] for i in perm] + self._g.set_shape(node.output[0], new_shape) + return True + + self.logger.debug("input transpose does not have single consumer, skipping...") + return False + + # get the input index of transpose op in node's inputs. + def _get_input_index_for_trans(self, node, trans): + input_index = 0 + for i in node.input: + if i == trans.output[0]: + break + input_index += 1 + return input_index + + # the assumption is: both node and trans have only 1 output + def _switch_transpose_and_node(self, node, trans, update_shape=True): + if not self._nodes_has_single_consumer_node([trans]): + return False + + input_index = self._get_input_index_for_trans(node, trans) + + self._g.replace_all_inputs(node.output[0], trans.output[0]) # ops=self._g.get_nodes() + self._g.replace_input(node, node.input[input_index], trans.input[0], input_index) + self._g.replace_input(trans, trans.input[0], node.output[0], 0) + + # need to transpose node shape in backward direction as well after switch + # otherwise, reshape added in post_optimize_action may not work correctly + shape = self._g.get_shape(node.output[0]) + if update_shape and shape: + # only nhwc transpose can reach here + new_shape = [shape[i] for i in NHWC_TO_NCHW] + self._g.set_shape(node.output[0], new_shape) + return True + + # if return value is True, then it means Transpose is handled as designed + # otherwise, it means that we skip handling since it is not in our support set + def _handle_nhwc_tranpose(self, trans): + if trans.output[0] in self._g.outputs: + self.logger.debug("%s connects to graph outputs, skip", trans.output[0]) + return False + out_nodes = self._g.find_output_consumers(trans.output[0]) + if len(out_nodes) == 1: + p = out_nodes[0] + if p.name in self._output_names: + self.logger.debug("cannot move transpose down since it met output node %s", p.name) + return False + + if p.type in self._handler_map: + op_handler = self._handler_map[p.type] + return op_handler(trans, p) + return False + if out_nodes: + # move transpose into branches to let Transposes can be "handled" in each branch + for n in out_nodes: + branch_trans = n.graph.make_node("Transpose", [trans.input[0]], attr=trans.get_onnx_attrs()) + n.graph.replace_input(n, trans.output[0], branch_trans.output[0]) + self._g.remove_node(trans.name) + return False + + def _remove_useless_tranpose(self, trans): + self._g.replace_all_inputs(trans.output[0], trans.input[0]) # ops=self._g.get_nodes() + self._g.remove_node(trans.name) + + def _nodes_has_single_consumer_node(self, nodes): + for n in nodes: + for output in n.output: + cnt = len(set(self._g.find_output_consumers(output))) + if cnt != 1: + return False + return True + + def _get_non_nchw_transpose_output_nodes(self, node): + # we just support node having 1 output, we need consider cases where node has more than 1 outputs + assert len(node.output) == 1 + non_nchw_tranpose_nodes = [] + consumers = self._g.find_output_consumers(node.output[0]) + for o in consumers: + if not is_nchw_transpose(o) and o not in non_nchw_tranpose_nodes: + non_nchw_tranpose_nodes.append(o) + return non_nchw_tranpose_nodes + + def _create_transpose_pairs_after_node(self, trans_rank, node): + assert len(node.output) == 1 # just support node who has 1 output + non_nchw_trans_consumers = self._get_non_nchw_transpose_output_nodes(node) + # add Transpose(0, 3, 1, 2) and Transpose(0, 2, 3, 1) before each non_nchw_trans_consumers + for consumer in non_nchw_trans_consumers: + perms = (NHWC_TO_NCHW, NCHW_TO_NHWC) if trans_rank == 4 else (NDHWC_TO_NCDHW, NCDHW_TO_NDHWC) + nchw_node = self._g.make_node("Transpose", [node.output[0]], attr={"perm": perms[0]}) + nhwc_node = self._g.make_node("Transpose", [nchw_node.output[0]], attr={"perm": perms[1]}) + self._g.replace_input(consumer, node.output[0], nhwc_node.output[0]) + + def _create_transpose_pairs_before_node(self, trans_rank, node): + def shape_after_expand(ori_shape): + # according to broadcasting rule to expand shape to 4D while not tile the tensor here + # still count on the broadcasting op to tile the tensor + if ori_shape.count(-1) >= 2: + self.logger.warning("%s shape can contain one -1 at most, otherwise reshape op can't work", node.name) + return None + ori_rank = len(ori_shape) + new_shape = [1] * (trans_rank - ori_rank) + ori_shape + return new_shape + + non_nhwc_trans_inputs = [] + for input_id, n in zip(node.input, node.inputs): + if not is_nhwc_transpose(n): + # check in case node has two inputs coming from a same node output. + if [input_id, n] not in non_nhwc_trans_inputs: + non_nhwc_trans_inputs.append([input_id, n]) + + # add Transpose NHWC_TO_NCHW and Transpose NCHW_TO_NHWC before each non_nhwc_trans_consumers + shape_unknow = [input_id for input_id, _ in non_nhwc_trans_inputs if self._g.get_shape(input_id) is None] + if shape_unknow: + if self._g.opset <= 9: + msg = "%s 's shape is unknown, ConstantOfShape will be used which exists in version 9 or higher" \ + "while graph's opset version is %s" % (shape_unknow, self._g.opset) + self.logger.warning(msg) + return False + + for input_id, n in non_nhwc_trans_inputs: + shape = self._g.get_shape(input_id) + # if rank of n is not transpose rank, then we need to insert a reshape op before inserting a transpose + # for example shape of n is [x, y], then output shape of reshape will be [1, 1, x, y] or [1, 1, 1, x, y] + if shape is None: + const_4 = self._g.make_const(utils.make_name("const_4"), np.array([trans_rank], np.int64)).output[0] + tensor_1 = onnx.helper.make_tensor("value", onnx.TensorProto.INT64, [1], [1]) + shape_node = self._g.make_node("Shape", [input_id]).output[0] + rank_node = self._g.make_node("Shape", [shape_node]).output[0] + expand_rank = self._g.make_node("Sub", [const_4, rank_node]).output[0] + array_fill_1 = self._g.make_node("ConstantOfShape", [expand_rank], attr={"value": tensor_1}).output[0] + new_shape = self._g.make_node("Concat", [array_fill_1, shape_node], attr={"axis": 0}).output[0] + reshape = self._g.make_node("Reshape", [input_id, new_shape]).output[0] + input_of_new_trans = reshape + elif len(shape) == trans_rank: + input_of_new_trans = input_id + else: + shape = shape_after_expand(shape) + if shape is None: + return False + const = self._g.make_const(utils.make_name("reshape_shape"), np.array(shape, np.int64)).output[0] + reshape = self._g.make_node("Reshape", [input_id, const]).output[0] + input_of_new_trans = reshape + + perms = (NHWC_TO_NCHW, NCHW_TO_NHWC) if trans_rank == 4 else (NDHWC_TO_NCDHW, NCDHW_TO_NDHWC) + nchw_node = self._g.make_node("Transpose", [input_of_new_trans], attr={"perm": perms[0]}) + nhwc_node = self._g.make_node("Transpose", [nchw_node.output[0]], attr={"perm": perms[1]}) + self._g.replace_input(node, input_id, nhwc_node.output[0]) + return True + + def _add_handler(self, trans, node): + if node.inputs[1].is_const(): + t_p = trans.inputs[0] + if t_p.type in ("Conv", "ConvTranspose") and len(t_p.input) == 2: + # if Conv or ConvTranspose's bias input is not set, then we set, otherwise, we don't set + # todo: maybe we can add already set bias with the input??? try later + + if not self._nodes_has_single_consumer_node([t_p]): + self.logger.debug("Conv does not have single consumer, can not merge Conv and Add") + return self._handle_node_having_branches(trans, node) + + if not self._nodes_has_single_consumer_node([trans]): + self.logger.debug("input transpose does not have single consumer, skipping...") + return False + + target_node = node.inputs[1] + numpy_val = target_node.get_tensor_value(as_list=False) + # Optional 1D bias to be added to the convolution, has size of M + if len(numpy_val.shape) - numpy_val.shape.count(1) > 1: + self.logger.debug("Bias is not 1D, can not merge Conv and Add") + return self._handle_node_having_branches(trans, node) + + bias_size = max(numpy_val.shape) + size_m = t_p.inputs[1].output_shapes[0][0] + if bias_size != size_m: + self.logger.debug("Bias size is not M, can not merge Conv and Add") + return self._handle_node_having_branches(trans, node) + + target_val = numpy_val.reshape(bias_size) + target_node.set_tensor_value(target_val) + + conv_inputs = [t_p.input[0], t_p.input[1], node.input[1]] + conv_node = self._g.make_node(t_p.type, conv_inputs, attr=t_p.get_onnx_attrs()) + self._g.replace_input(trans, trans.input[0], utils.port_name(conv_node.name), 0) + self._g.replace_all_inputs(node.output[0], trans.output[0]) # ops=self._g.get_nodes() + self._g.remove_node(t_p.name) + self._g.remove_node(node.name) + return True + return self._handle_node_having_branches(trans, node) + + def _transpose_handler(self, trans, node): + if is_nchw_transpose(node): + for g in {self._g, node.graph}: + g.replace_all_inputs(node.output[0], trans.input[0]) # ops=g.get_nodes() + + shape = node.graph.get_shape(node.output[0]) + dtype = node.graph.get_dtype(node.output[0]) + if node.output[0] in node.graph.outputs: + node.graph.make_node("Identity", [trans.input[0]], + outputs=node.output, shapes=[shape], dtypes=[dtype]) + self._g.remove_node(trans.name) + node.graph.remove_node(node.name) + return True + return False + + def _maxmin_handler(self, trans, node): + return self._handle_node_having_branches(trans, node) + + def _mul_handler(self, trans, node): + multiplier_input_id = None + multiplier_input_node = None + multiplier_input_idx = None + for idx, (input_id, input_node) in enumerate(zip(node.input, node.inputs)): + if input_id != trans.output[0]: + multiplier_input_id = input_id + multiplier_input_node = input_node + multiplier_input_idx = idx + + # node's inputs may come from one same node. if so the multiplier_input_node may be none + if multiplier_input_node is None: + if not self._nodes_has_single_consumer_node([trans]): + return False + self._g.replace_all_inputs(node.output[0], trans.output[0]) + self._g.replace_input(node, node.input[0], trans.input[0], 0) + self._g.replace_input(node, node.input[1], trans.input[0], 1) + self._g.replace_input(trans, trans.input[0], node.output[0], 0) + return True + + # convert mul(trans(x), trans(y)) -> trans(mul(x, y)) + if multiplier_input_node.type == "Transpose": + if is_nhwc_transpose(multiplier_input_node): + if not self._nodes_has_single_consumer_node([multiplier_input_node]): + return False + input_index = self._get_input_index_for_trans(node, multiplier_input_node) + if not self._switch_transpose_and_node(node, trans): + return False + + self._g.replace_input(node, node.input[input_index], multiplier_input_node.input[0], input_index) + self._g.remove_node(multiplier_input_node.name) + return True + + # handle const multipliers + if not multiplier_input_node.is_const(): + return False + multiplier = multiplier_input_node.get_tensor_value(as_list=False) + + # todo: apply this block if we have model case multiplier_input_id==0, and verify that. + if multiplier_input_id == node.input[1]: + t_p = trans.inputs[0] + trans_rank = get_transpose_rank(trans) + # make sure conv don't have bias set + if t_p.type == "Conv" and t_p.inputs[1].is_const() and len(t_p.input) == 2 and trans_rank == 4: + conv = t_p + numpy_val = conv.inputs[1].get_tensor_value(as_list=False) + transposed_val = np.transpose(numpy_val, (2, 3, 1, 0)) + mul_val = multiplier + result = np.multiply(transposed_val, mul_val) + conv.inputs[1].set_tensor_value(np.transpose(result, (3, 2, 0, 1))) + + self._g.replace_all_inputs(node.output[0], trans.output[0]) # ops=self._g.get_nodes() + self._g.remove_node(node.name) + return True + + # if the shape is (), we just move transpose after the mul + if not multiplier.shape: + return self._switch_transpose_and_node(node, trans) + + # if multiplier is 1-D + if len(multiplier.shape) == 1 and multiplier.shape[0] == 1: + # shape is (1) + return self._switch_transpose_and_node(node, trans) + + # if multiplier has shape (N,) or (1, N) or (1, 1, N) .... + if np.prod(multiplier.shape) == multiplier.shape[-1]: + if not self._nodes_has_single_consumer_node([multiplier_input_node]): + new_inp = self._g.copy_const(multiplier_input_node) + self._g.replace_input(node, multiplier_input_id, new_inp.output[0], multiplier_input_idx) + multiplier_input_node = new_inp + perm = list(trans.get_attr('perm').ints) + new_shape = np.ones(len(perm), dtype=np.int32) + new_shape[perm[-1]] = multiplier.shape[-1] + multiplier_input_node.set_tensor_value(multiplier.reshape(new_shape)) + return self._switch_transpose_and_node(node, trans) + + return False + + def _sum_handler(self, trans, node): + inputs = node.inputs + trans_shape = self._g.get_shape(trans.output[0]) + perm = list(trans.get_attr('perm').ints) + untrans_idx = [perm.index(i) for i in range(len(perm))] + + # check if sum(trans(x1), trans(x2), const(x3), ...) can be switched + for n in inputs: + if n.type not in ["Transpose", "Const"]: + return False + if not self._nodes_has_single_consumer_node([n]): + return False + if n.is_const(): + # if graph is valid, op shapes should be valid + # const is special case, in case of broadcasting + # ensure rank matches + n_shape = self._g.get_shape(n.output[0]) + if len(n_shape) != len(trans_shape): + return False + else: + if list(n.get_attr('perm').ints) != perm: + return False + + # switch to trans(sum(x1, x2, x3, ...)) + self._g.replace_all_inputs(node.output[0], trans.output[0]) # ops=self._g.get_nodes() + new_input = [n.output[0] if n.is_const() else n.input[0] for n in inputs] + self._g.replace_inputs(node, new_input) + self._g.replace_input(trans, trans.input[0], node.output[0], 0) + + # adjust shape if present + shape = self._g.get_shape(node.output[0]) + if shape: + self._g.set_shape(node.output[0], [shape[i] for i in untrans_idx]) + + # update constants, remove dangling transposes + for n in inputs: + if n.is_const(): + val = n.get_tensor_value(as_list=False) + new_val = np.transpose(val, untrans_idx) + n.set_tensor_value(new_val) + elif n.name != trans.name: + self._g.remove_node(n.name) + return True + + def _identity_handler(self, trans, node): + if node.output[0] in node.graph.outputs: + return False + for g in {self._g, node.graph}: + g.replace_all_inputs(node.output[0], trans.output[0]) # ops=g.get_nodes() + node.graph.remove_node(node.name) + return True + + def _concat_handler(self, trans, node): + if self._handle_node_having_branches(trans, node): + perm = trans.get_attr_value("perm") + axis = node.get_attr_value("axis", 0) + new_axis = perm[axis] + node.set_attr("axis", new_axis) + return True + return False + + def _split_handler(self, trans, node): + # Todo: need handle cases where Slit node has more than 1 outputs. + if self._handle_node_having_branches(trans, node): + node.set_attr("axis", 1) + return True + return False + + def _squeeze_handler(self, trans, node): + trans_rank = get_transpose_rank(trans) + def _calculate_new_attr(ori_perm, ori_squeeze_axes): + ori_squeeze_axes = [i if i >= 0 else i + trans_rank for i in ori_squeeze_axes] + new_squeeze_axes = sorted([ori_perm[i] for i in ori_squeeze_axes]) + # calculate output shape after trans and squeeze + n = len(ori_perm) + input_shape = list(range(n)) + shape_after_trans = [input_shape[i] for i in ori_perm] + output_shape = [shape_after_trans[i] for i in range(n) if i not in ori_squeeze_axes] + # calculate new_perm + # after switch, the output shape should be same, using this condtion we can figure the new perm + shape_after_squeeze = [input_shape[i] for i in range(n) if i not in new_squeeze_axes] + new_perm = [shape_after_squeeze.index(i) for i in output_shape] + + return new_perm, new_squeeze_axes + + if not self._nodes_has_single_consumer_node([trans]): + return False + + axes = None + # in opset 13, axes is an input not attr + if node.get_attr("axes"): + axes = node.get_attr("axes").ints + if len(node.input) > 1 and node.inputs[1].is_const(): + axes = node.inputs[1].get_tensor_value(as_list=True) + + if axes is not None: + # switch tran and squeeze + # 1 switch + self._g.replace_all_inputs(node.output[0], trans.output[0]) # ops=self._g.get_nodes() + self._g.replace_input(node, node.input[0], trans.input[0], 0) + self._g.replace_input(trans, trans.input[0], node.output[0], 0) + # 2 correct attr of nodes + squeeze_axes = sorted(axes) + trans_perm = list(trans.get_attr("perm").ints) + new_perm, new_squeeze_axes = _calculate_new_attr(ori_perm=trans_perm, ori_squeeze_axes=squeeze_axes) + trans.set_attr("perm", new_perm) + if self._g.opset <= 12: + node.set_attr("axes", new_squeeze_axes) + else: + new_axes_np = np.array(new_squeeze_axes, dtype=np.int64) + new_axes_const = self._g.make_const(utils.make_name(node.inputs[1].name), new_axes_np) + self._g.replace_inputs(node, [node.input[0], new_axes_const.output[0]]) + # 3 set shape + squeeze_shape = self._g.get_shape(node.output[0]) + self._g.set_shape(trans.output[0], squeeze_shape) + input_shape = self._g.get_shape(node.input[0]) + if input_shape is not None: + new_squeeze_output_shape = [input_shape[i] for i in range(trans_rank) if i not in new_squeeze_axes] + else: + new_squeeze_output_shape = [-1] * trans_rank + self.logger.warning("%s's shape is unknown, which may interfere further optimization", node.input[0]) + self._g.set_shape(node.output[0], new_squeeze_output_shape) + return True + return False + + def _sub_handler(self, trans, node): + return self._handle_node_having_branches(trans, node) + + def _pad_handler(self, trans, node): + trans_rank = get_transpose_rank(trans) + # [N-start, H-start, W-start, C-start, N-end, H-end, W-end, C-end] + if self._g.opset < 11: + pads = node.get_attr('pads').ints # [x1_begin, x2_begin...x1_end, x2_end,...] + # NHWC->NCHW + if trans_rank == 4: + new_pads = [pads[0], pads[3], pads[1], pads[2], pads[4], pads[7], pads[5], pads[6]] + else: + new_pads = [pads[0], pads[4], pads[1], pads[2], pads[3], pads[5], pads[9], pads[6], pads[7], pads[8]] + node.set_attr("pads", new_pads) + return self._switch_transpose_and_node(node, trans) + + input1 = node.inputs[1] + if input1.is_const(): + if input1.data_format in ["NHWC", "unkown"]: + if not self._nodes_has_single_consumer_node([input1]): + input1 = self._g.copy_const(input1) + self._g.replace_input(node, node.input[1], input1.output[0], 1) + pads = input1.get_tensor_value() + # NHWC->NCHW + if trans_rank == 4: + new_pads = np.array([pads[0], pads[3], pads[1], pads[2], + pads[4], pads[7], pads[5], pads[6]], dtype=np.int64) + else: + new_pads = np.array([pads[0], pads[4], pads[1], pads[2], pads[3], + pads[5], pads[9], pads[6], pads[7], pads[8]], dtype=np.int64) + input1.set_tensor_value(new_pads) + input1.data_format = "NCHW" + return self._switch_transpose_and_node(node, trans) + # when the second input is not a constant, let's shuffle it with Split followed by Concat + # there are examples of models, where this non-constant input + # gets constant folded anyway by a framework. + split = self._g.make_node("Split", inputs=[node.input[1]], attr={}, output_count=trans_rank * 2) + pads = split.output + if trans_rank == 4: + new_pads = self._g.make_node("Concat", [pads[0], pads[3], pads[1], pads[2], + pads[4], pads[7], pads[5], pads[6]], + {'axis': 0}) + else: + new_pads = self._g.make_node("Concat", [pads[0], pads[4], pads[1], pads[2], pads[3], + pads[5], pads[9], pads[6], pads[7], pads[8]], + {'axis': 0}) + self._g.replace_input(node, node.input[1], new_pads.output[0], 1) + return self._switch_transpose_and_node(node, trans) + + def _arg_min_max_handler(self, trans, node): + axis = node.get_attr_value("axis", 0) + node.set_attr("axes", [axis]) + result = self._reduce_handler(trans, node) + new_axis = node.get_attr_value("axes")[0] + node.set_attr("axis", new_axis) + del node.attr["axes"] + return result + + def _reduce_handler(self, trans, node): + keepdims = node.get_attr_value("keepdims", 1) + trans_rank = get_transpose_rank(trans) + axes = node.get_attr_value("axes", list(range(trans_rank))) + perm = trans.get_attr("perm").ints + axes = [a + trans_rank if a < 0 else a for a in axes] + new_axes = [perm[a] for a in axes] + update_shape = keepdims == 1 + shape = self._g.get_shape(node.output[0]) + if not self._switch_transpose_and_node(node, trans, update_shape): + return False + node.set_attr("axes", new_axes) + if keepdims == 0: + remaining_axes = [] + j = 0 + for i in range(trans_rank): + if i in new_axes: + remaining_axes.append(None) + else: + remaining_axes.append(j) + j += 1 + new_perm = [remaining_axes[p] for p in perm if remaining_axes[p] is not None] + if shape: + new_shape = [shape[new_perm.index(i)] for i in range(len(new_perm))] + self._g.set_shape(node.output[0], new_shape) + trans.set_attr("perm", new_perm) + return True + + def _reducesum_handler(self, trans, node): + keepdims = node.get_attr("keepdims") + if self._g.opset <= 12: + return self._reduce_handler(trans, node) + if keepdims and keepdims.i == 0: + return False + if node.inputs[1].is_const(): + axes = node.inputs[1].get_tensor_value() + perm = trans.get_attr('perm').ints + axes = [perm[axes[i]] for i in range(len(axes))] + new_axes = np.array(axes, dtype=np.int64) + if self._nodes_has_single_consumer_node([node.inputs[1]]): + node.inputs[1].set_tensor_value(new_axes) + else: + new_axes_const = self._g.make_const( + utils.make_name(node.inputs[1].name), new_axes + ) + self._g.replace_input(node, node.input[1], new_axes_const.output[0], 1) + return self._switch_transpose_and_node(node, trans) + return False + + def _slice_handler(self, trans, node): + trans_rank = get_transpose_rank(trans) + axes = None + if self._g.opset < 10: + axes_values = node.get_attr("axes") + if not axes_values: + return False + axes = axes_values.ints + perm = NCHW_TO_NHWC if trans_rank == 4 else NCDHW_TO_NDHWC + new_axes = [perm[axes[i]] for i in range(len(axes))] + node.set_attr("axes", new_axes) + return self._switch_transpose_and_node(node, trans) + # in opset 10, axes is input instead of an attribute. + if len(node.inputs) >= 4 and node.inputs[3].is_const(): + axes = node.inputs[3].get_tensor_value(as_list=False) + dtype = axes.dtype + axes = axes.tolist() + perm = NCHW_TO_NHWC if trans_rank == 4 else NCDHW_TO_NDHWC + axes = [perm[axes[i]] for i in range(len(axes))] + # axes node might be shared + new_axes = np.array(axes, dtype=dtype) + if self._nodes_has_single_consumer_node([node.inputs[3]]): + node.inputs[3].set_tensor_value(new_axes) + else: + new_axes_const = self._g.make_const( + utils.make_name(node.inputs[3].name), new_axes + ) + self._g.replace_input(node, node.input[3], new_axes_const.output[0], 3) + return self._switch_transpose_and_node(node, trans) + return False + + def _quantize_handler(self, trans, node): + # Used for QuantizeLinear and DequantizeLinear + if not self._switch_transpose_and_node(node, trans): + return False + if 'axis' in node.attr: + perm = trans.get_attr_value("perm") + axis = node.get_attr_value("axis") + new_axis = perm[axis] + node.set_attr("axis", new_axis) + return True + + def _simple_through_handler(self, trans, node): + return self._switch_transpose_and_node(node, trans) + + def _shape_handler(self, trans, node): + # input > trans > shape can be changed into input > shape > gather + if not self._nodes_has_single_consumer_node([trans]): + return False + + output_shape = self._g.get_shape(node.output[0]) + output_dtype = self._g.get_dtype(node.output[0]) + self._g.remove_node(trans.name) + self._g.remove_node(node.name) + shape_node = self._g.make_node("Shape", [trans.input[0]]) + const_node = self._g.make_const(utils.make_name("Const"), np.array(trans.get_attr("perm").ints)) + gather_node = self._g.make_node("Gather", [shape_node.output[0], const_node.output[0]], outputs=node.output) + self._g.set_shape(gather_node.output[0], output_shape) + self._g.set_dtype(gather_node.output[0], output_dtype) + return True diff --git a/lib/python3.10/site-packages/tf2onnx/optimizer/upsample_optimizer.py b/lib/python3.10/site-packages/tf2onnx/optimizer/upsample_optimizer.py new file mode 100644 index 0000000000000000000000000000000000000000..2d676bfcab14b019e14a1281ac3f40a87a30172c --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/optimizer/upsample_optimizer.py @@ -0,0 +1,53 @@ +# SPDX-License-Identifier: Apache-2.0 + +"""Resize Optimizer. + Replace resize operations with all ones in scale with Identity nodes +""" + +from __future__ import unicode_literals + +import numpy as np + +from .optimizer_base import GraphOptimizerBase + +# pylint: disable=logging-not-lazy,unused-argument,missing-docstring,unused-variable,arguments-differ + + +class UpsampleOptimizer(GraphOptimizerBase): + """Upsample Optimizer.""" + + def __init__(self): # pylint: disable=useless-super-delegation + super(UpsampleOptimizer, self).__init__() + self._g = None + + def _optimize(self, graph): + return self._apply_optimization( + graph, + self._optimize_at_current_graph_level) + + def _optimize_at_current_graph_level(self, graph): + self._g = graph + # replace upsample node with all ones in scale with identity node + for n in self._g.get_nodes(): + if n.type == "Upsample": + node_changed = False + # upsample in opset <=8 has scales in attributes + if self._g.opset <= 8: + scales = n.get_attr_value("scales") + if scales and all([float(s) == 1. for s in scales]): + n.type = "Identity" + node_changed = True + # upsample in opset >= 9 has scales in input[1] + if self._g.opset >= 9 and len(n.input) == 2: + scales_input = n.inputs[1] + + if scales_input.is_const() and \ + np.all(scales_input.get_tensor_value(as_list=False) == 1.): + n.type = "Identity" + n.input = [n.input[0]] + node_changed = True + if node_changed: + self.logger.debug("replacing " + n.name + + " with Identity operation ") + + return self._g diff --git a/lib/python3.10/site-packages/tf2onnx/tflite/BroadcastToOptions.py b/lib/python3.10/site-packages/tf2onnx/tflite/BroadcastToOptions.py new file mode 100644 index 0000000000000000000000000000000000000000..75cd28a0a7f01b12e3a0b153e51e0e9cf469a2ec --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/tflite/BroadcastToOptions.py @@ -0,0 +1,30 @@ +# SPDX-License-Identifier: Apache-2.0 + +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy +np = import_numpy() + +class BroadcastToOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAsBroadcastToOptions(cls, buf, offset): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = BroadcastToOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def BroadcastToOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # BroadcastToOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + +def BroadcastToOptionsStart(builder): builder.StartObject(0) +def BroadcastToOptionsEnd(builder): return builder.EndObject() diff --git a/lib/python3.10/site-packages/tf2onnx/tflite/BuiltinOperator.py b/lib/python3.10/site-packages/tf2onnx/tflite/BuiltinOperator.py new file mode 100644 index 0000000000000000000000000000000000000000..be6a658ecad18cbc39dad9edf4c4df3e7b0b4b9d --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/tflite/BuiltinOperator.py @@ -0,0 +1,140 @@ +# SPDX-License-Identifier: Apache-2.0 + +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +class BuiltinOperator(object): + ADD = 0 + AVERAGE_POOL_2D = 1 + CONCATENATION = 2 + CONV_2D = 3 + DEPTHWISE_CONV_2D = 4 + DEPTH_TO_SPACE = 5 + DEQUANTIZE = 6 + EMBEDDING_LOOKUP = 7 + FLOOR = 8 + FULLY_CONNECTED = 9 + HASHTABLE_LOOKUP = 10 + L2_NORMALIZATION = 11 + L2_POOL_2D = 12 + LOCAL_RESPONSE_NORMALIZATION = 13 + LOGISTIC = 14 + LSH_PROJECTION = 15 + LSTM = 16 + MAX_POOL_2D = 17 + MUL = 18 + RELU = 19 + RELU_N1_TO_1 = 20 + RELU6 = 21 + RESHAPE = 22 + RESIZE_BILINEAR = 23 + RNN = 24 + SOFTMAX = 25 + SPACE_TO_DEPTH = 26 + SVDF = 27 + TANH = 28 + CONCAT_EMBEDDINGS = 29 + SKIP_GRAM = 30 + CALL = 31 + CUSTOM = 32 + EMBEDDING_LOOKUP_SPARSE = 33 + PAD = 34 + UNIDIRECTIONAL_SEQUENCE_RNN = 35 + GATHER = 36 + BATCH_TO_SPACE_ND = 37 + SPACE_TO_BATCH_ND = 38 + TRANSPOSE = 39 + MEAN = 40 + SUB = 41 + DIV = 42 + SQUEEZE = 43 + UNIDIRECTIONAL_SEQUENCE_LSTM = 44 + STRIDED_SLICE = 45 + BIDIRECTIONAL_SEQUENCE_RNN = 46 + EXP = 47 + TOPK_V2 = 48 + SPLIT = 49 + LOG_SOFTMAX = 50 + DELEGATE = 51 + BIDIRECTIONAL_SEQUENCE_LSTM = 52 + CAST = 53 + PRELU = 54 + MAXIMUM = 55 + ARG_MAX = 56 + MINIMUM = 57 + LESS = 58 + NEG = 59 + PADV2 = 60 + GREATER = 61 + GREATER_EQUAL = 62 + LESS_EQUAL = 63 + SELECT = 64 + SLICE = 65 + SIN = 66 + TRANSPOSE_CONV = 67 + SPARSE_TO_DENSE = 68 + TILE = 69 + EXPAND_DIMS = 70 + EQUAL = 71 + NOT_EQUAL = 72 + LOG = 73 + SUM = 74 + SQRT = 75 + RSQRT = 76 + SHAPE = 77 + POW = 78 + ARG_MIN = 79 + FAKE_QUANT = 80 + REDUCE_PROD = 81 + REDUCE_MAX = 82 + PACK = 83 + LOGICAL_OR = 84 + ONE_HOT = 85 + LOGICAL_AND = 86 + LOGICAL_NOT = 87 + UNPACK = 88 + REDUCE_MIN = 89 + FLOOR_DIV = 90 + REDUCE_ANY = 91 + SQUARE = 92 + ZEROS_LIKE = 93 + FILL = 94 + FLOOR_MOD = 95 + RANGE = 96 + RESIZE_NEAREST_NEIGHBOR = 97 + LEAKY_RELU = 98 + SQUARED_DIFFERENCE = 99 + MIRROR_PAD = 100 + ABS = 101 + SPLIT_V = 102 + UNIQUE = 103 + CEIL = 104 + REVERSE_V2 = 105 + ADD_N = 106 + GATHER_ND = 107 + COS = 108 + WHERE = 109 + RANK = 110 + ELU = 111 + REVERSE_SEQUENCE = 112 + MATRIX_DIAG = 113 + QUANTIZE = 114 + MATRIX_SET_DIAG = 115 + ROUND = 116 + HARD_SWISH = 117 + IF = 118 + WHILE = 119 + NON_MAX_SUPPRESSION_V4 = 120 + NON_MAX_SUPPRESSION_V5 = 121 + SCATTER_ND = 122 + SELECT_V2 = 123 + DENSIFY = 124 + SEGMENT_SUM = 125 + BATCH_MATMUL = 126 + PLACEHOLDER_FOR_GREATER_OP_CODES = 127 + CUMSUM = 128 + CALL_ONCE = 129 + BROADCAST_TO = 130 + RFFT2D = 131 + diff --git a/lib/python3.10/site-packages/tf2onnx/tflite/BuiltinOptions.py b/lib/python3.10/site-packages/tf2onnx/tflite/BuiltinOptions.py new file mode 100644 index 0000000000000000000000000000000000000000..e354e578efa61a33b7bde204dced602cab34725c --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/tflite/BuiltinOptions.py @@ -0,0 +1,114 @@ +# SPDX-License-Identifier: Apache-2.0 + +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +class BuiltinOptions(object): + NONE = 0 + Conv2DOptions = 1 + DepthwiseConv2DOptions = 2 + ConcatEmbeddingsOptions = 3 + LSHProjectionOptions = 4 + Pool2DOptions = 5 + SVDFOptions = 6 + RNNOptions = 7 + FullyConnectedOptions = 8 + SoftmaxOptions = 9 + ConcatenationOptions = 10 + AddOptions = 11 + L2NormOptions = 12 + LocalResponseNormalizationOptions = 13 + LSTMOptions = 14 + ResizeBilinearOptions = 15 + CallOptions = 16 + ReshapeOptions = 17 + SkipGramOptions = 18 + SpaceToDepthOptions = 19 + EmbeddingLookupSparseOptions = 20 + MulOptions = 21 + PadOptions = 22 + GatherOptions = 23 + BatchToSpaceNDOptions = 24 + SpaceToBatchNDOptions = 25 + TransposeOptions = 26 + ReducerOptions = 27 + SubOptions = 28 + DivOptions = 29 + SqueezeOptions = 30 + SequenceRNNOptions = 31 + StridedSliceOptions = 32 + ExpOptions = 33 + TopKV2Options = 34 + SplitOptions = 35 + LogSoftmaxOptions = 36 + CastOptions = 37 + DequantizeOptions = 38 + MaximumMinimumOptions = 39 + ArgMaxOptions = 40 + LessOptions = 41 + NegOptions = 42 + PadV2Options = 43 + GreaterOptions = 44 + GreaterEqualOptions = 45 + LessEqualOptions = 46 + SelectOptions = 47 + SliceOptions = 48 + TransposeConvOptions = 49 + SparseToDenseOptions = 50 + TileOptions = 51 + ExpandDimsOptions = 52 + EqualOptions = 53 + NotEqualOptions = 54 + ShapeOptions = 55 + PowOptions = 56 + ArgMinOptions = 57 + FakeQuantOptions = 58 + PackOptions = 59 + LogicalOrOptions = 60 + OneHotOptions = 61 + LogicalAndOptions = 62 + LogicalNotOptions = 63 + UnpackOptions = 64 + FloorDivOptions = 65 + SquareOptions = 66 + ZerosLikeOptions = 67 + FillOptions = 68 + BidirectionalSequenceLSTMOptions = 69 + BidirectionalSequenceRNNOptions = 70 + UnidirectionalSequenceLSTMOptions = 71 + FloorModOptions = 72 + RangeOptions = 73 + ResizeNearestNeighborOptions = 74 + LeakyReluOptions = 75 + SquaredDifferenceOptions = 76 + MirrorPadOptions = 77 + AbsOptions = 78 + SplitVOptions = 79 + UniqueOptions = 80 + ReverseV2Options = 81 + AddNOptions = 82 + GatherNdOptions = 83 + CosOptions = 84 + WhereOptions = 85 + RankOptions = 86 + ReverseSequenceOptions = 87 + MatrixDiagOptions = 88 + QuantizeOptions = 89 + MatrixSetDiagOptions = 90 + HardSwishOptions = 91 + IfOptions = 92 + WhileOptions = 93 + DepthToSpaceOptions = 94 + NonMaxSuppressionV4Options = 95 + NonMaxSuppressionV5Options = 96 + ScatterNdOptions = 97 + SelectV2Options = 98 + DensifyOptions = 99 + SegmentSumOptions = 100 + BatchMatMulOptions = 101 + CumsumOptions = 102 + CallOnceOptions = 103 + BroadcastToOptions = 104 + Rfft2dOptions = 105 + diff --git a/lib/python3.10/site-packages/tf2onnx/tflite/CallOptions.py b/lib/python3.10/site-packages/tf2onnx/tflite/CallOptions.py new file mode 100644 index 0000000000000000000000000000000000000000..ee8f466a6d05c873a9fee587a47b7b09820aadf7 --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/tflite/CallOptions.py @@ -0,0 +1,38 @@ +# SPDX-License-Identifier: Apache-2.0 + +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy +np = import_numpy() + +class CallOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAsCallOptions(cls, buf, offset): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = CallOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def CallOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # CallOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # CallOptions + def Subgraph(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Uint32Flags, o + self._tab.Pos) + return 0 + +def CallOptionsStart(builder): builder.StartObject(1) +def CallOptionsAddSubgraph(builder, subgraph): builder.PrependUint32Slot(0, subgraph, 0) +def CallOptionsEnd(builder): return builder.EndObject() diff --git a/lib/python3.10/site-packages/tf2onnx/tflite/ConcatEmbeddingsOptions.py b/lib/python3.10/site-packages/tf2onnx/tflite/ConcatEmbeddingsOptions.py new file mode 100644 index 0000000000000000000000000000000000000000..d8424ca8694e1a723d0e6e31a77718316d281ca8 --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/tflite/ConcatEmbeddingsOptions.py @@ -0,0 +1,96 @@ +# SPDX-License-Identifier: Apache-2.0 + +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy +np = import_numpy() + +class ConcatEmbeddingsOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAsConcatEmbeddingsOptions(cls, buf, offset): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = ConcatEmbeddingsOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def ConcatEmbeddingsOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # ConcatEmbeddingsOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # ConcatEmbeddingsOptions + def NumChannels(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # ConcatEmbeddingsOptions + def NumColumnsPerChannel(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) + return 0 + + # ConcatEmbeddingsOptions + def NumColumnsPerChannelAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o) + return 0 + + # ConcatEmbeddingsOptions + def NumColumnsPerChannelLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # ConcatEmbeddingsOptions + def NumColumnsPerChannelIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + return o == 0 + + # ConcatEmbeddingsOptions + def EmbeddingDimPerChannel(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) + return 0 + + # ConcatEmbeddingsOptions + def EmbeddingDimPerChannelAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o) + return 0 + + # ConcatEmbeddingsOptions + def EmbeddingDimPerChannelLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # ConcatEmbeddingsOptions + def EmbeddingDimPerChannelIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + return o == 0 + +def ConcatEmbeddingsOptionsStart(builder): builder.StartObject(3) +def ConcatEmbeddingsOptionsAddNumChannels(builder, numChannels): builder.PrependInt32Slot(0, numChannels, 0) +def ConcatEmbeddingsOptionsAddNumColumnsPerChannel(builder, numColumnsPerChannel): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(numColumnsPerChannel), 0) +def ConcatEmbeddingsOptionsStartNumColumnsPerChannelVector(builder, numElems): return builder.StartVector(4, numElems, 4) +def ConcatEmbeddingsOptionsAddEmbeddingDimPerChannel(builder, embeddingDimPerChannel): builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(embeddingDimPerChannel), 0) +def ConcatEmbeddingsOptionsStartEmbeddingDimPerChannelVector(builder, numElems): return builder.StartVector(4, numElems, 4) +def ConcatEmbeddingsOptionsEnd(builder): return builder.EndObject() diff --git a/lib/python3.10/site-packages/tf2onnx/tflite/Conv2DOptions.py b/lib/python3.10/site-packages/tf2onnx/tflite/Conv2DOptions.py new file mode 100644 index 0000000000000000000000000000000000000000..1ffcf8da14bd2e0043f9af984ccf7af655372a97 --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/tflite/Conv2DOptions.py @@ -0,0 +1,78 @@ +# SPDX-License-Identifier: Apache-2.0 + +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy +np = import_numpy() + +class Conv2DOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAsConv2DOptions(cls, buf, offset): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = Conv2DOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def Conv2DOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # Conv2DOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # Conv2DOptions + def Padding(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + + # Conv2DOptions + def StrideW(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # Conv2DOptions + def StrideH(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # Conv2DOptions + def FusedActivationFunction(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + + # Conv2DOptions + def DilationWFactor(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 1 + + # Conv2DOptions + def DilationHFactor(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 1 + +def Conv2DOptionsStart(builder): builder.StartObject(6) +def Conv2DOptionsAddPadding(builder, padding): builder.PrependInt8Slot(0, padding, 0) +def Conv2DOptionsAddStrideW(builder, strideW): builder.PrependInt32Slot(1, strideW, 0) +def Conv2DOptionsAddStrideH(builder, strideH): builder.PrependInt32Slot(2, strideH, 0) +def Conv2DOptionsAddFusedActivationFunction(builder, fusedActivationFunction): builder.PrependInt8Slot(3, fusedActivationFunction, 0) +def Conv2DOptionsAddDilationWFactor(builder, dilationWFactor): builder.PrependInt32Slot(4, dilationWFactor, 1) +def Conv2DOptionsAddDilationHFactor(builder, dilationHFactor): builder.PrependInt32Slot(5, dilationHFactor, 1) +def Conv2DOptionsEnd(builder): return builder.EndObject() diff --git a/lib/python3.10/site-packages/tf2onnx/tflite/DensifyOptions.py b/lib/python3.10/site-packages/tf2onnx/tflite/DensifyOptions.py new file mode 100644 index 0000000000000000000000000000000000000000..09eb98b984278de7284d34357c61c9f2bd227748 --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/tflite/DensifyOptions.py @@ -0,0 +1,30 @@ +# SPDX-License-Identifier: Apache-2.0 + +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy +np = import_numpy() + +class DensifyOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAsDensifyOptions(cls, buf, offset): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = DensifyOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def DensifyOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # DensifyOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + +def DensifyOptionsStart(builder): builder.StartObject(0) +def DensifyOptionsEnd(builder): return builder.EndObject() diff --git a/lib/python3.10/site-packages/tf2onnx/tflite/DimensionMetadata.py b/lib/python3.10/site-packages/tf2onnx/tflite/DimensionMetadata.py new file mode 100644 index 0000000000000000000000000000000000000000..68c9fd95c090f5c7fc61a27800ea722954ec3df1 --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/tflite/DimensionMetadata.py @@ -0,0 +1,84 @@ +# SPDX-License-Identifier: Apache-2.0 + +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy +np = import_numpy() + +class DimensionMetadata(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAsDimensionMetadata(cls, buf, offset): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = DimensionMetadata() + x.Init(buf, n + offset) + return x + + @classmethod + def DimensionMetadataBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # DimensionMetadata + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # DimensionMetadata + def Format(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + + # DimensionMetadata + def DenseSize(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # DimensionMetadata + def ArraySegmentsType(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Uint8Flags, o + self._tab.Pos) + return 0 + + # DimensionMetadata + def ArraySegments(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + from flatbuffers.table import Table + obj = Table(bytearray(), 0) + self._tab.Union(obj, o) + return obj + return None + + # DimensionMetadata + def ArrayIndicesType(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Uint8Flags, o + self._tab.Pos) + return 0 + + # DimensionMetadata + def ArrayIndices(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) + if o != 0: + from flatbuffers.table import Table + obj = Table(bytearray(), 0) + self._tab.Union(obj, o) + return obj + return None + +def DimensionMetadataStart(builder): builder.StartObject(6) +def DimensionMetadataAddFormat(builder, format): builder.PrependInt8Slot(0, format, 0) +def DimensionMetadataAddDenseSize(builder, denseSize): builder.PrependInt32Slot(1, denseSize, 0) +def DimensionMetadataAddArraySegmentsType(builder, arraySegmentsType): builder.PrependUint8Slot(2, arraySegmentsType, 0) +def DimensionMetadataAddArraySegments(builder, arraySegments): builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(arraySegments), 0) +def DimensionMetadataAddArrayIndicesType(builder, arrayIndicesType): builder.PrependUint8Slot(4, arrayIndicesType, 0) +def DimensionMetadataAddArrayIndices(builder, arrayIndices): builder.PrependUOffsetTRelativeSlot(5, flatbuffers.number_types.UOffsetTFlags.py_type(arrayIndices), 0) +def DimensionMetadataEnd(builder): return builder.EndObject() diff --git a/lib/python3.10/site-packages/tf2onnx/tflite/DimensionType.py b/lib/python3.10/site-packages/tf2onnx/tflite/DimensionType.py new file mode 100644 index 0000000000000000000000000000000000000000..50f94ce087e91f93ebd3832340f10ebf1e104afd --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/tflite/DimensionType.py @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: Apache-2.0 + +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +class DimensionType(object): + DENSE = 0 + SPARSE_CSR = 1 + diff --git a/lib/python3.10/site-packages/tf2onnx/tflite/ExpOptions.py b/lib/python3.10/site-packages/tf2onnx/tflite/ExpOptions.py new file mode 100644 index 0000000000000000000000000000000000000000..740efed3e43c2b542215e81751f0ba0b53b12dcc --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/tflite/ExpOptions.py @@ -0,0 +1,30 @@ +# SPDX-License-Identifier: Apache-2.0 + +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy +np = import_numpy() + +class ExpOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAsExpOptions(cls, buf, offset): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = ExpOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def ExpOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # ExpOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + +def ExpOptionsStart(builder): builder.StartObject(0) +def ExpOptionsEnd(builder): return builder.EndObject() diff --git a/lib/python3.10/site-packages/tf2onnx/tflite/FakeQuantOptions.py b/lib/python3.10/site-packages/tf2onnx/tflite/FakeQuantOptions.py new file mode 100644 index 0000000000000000000000000000000000000000..549bd5d64f7d2cb084e27ae48868e03cbdbe1090 --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/tflite/FakeQuantOptions.py @@ -0,0 +1,62 @@ +# SPDX-License-Identifier: Apache-2.0 + +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy +np = import_numpy() + +class FakeQuantOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAsFakeQuantOptions(cls, buf, offset): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = FakeQuantOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def FakeQuantOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # FakeQuantOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # FakeQuantOptions + def Min(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos) + return 0.0 + + # FakeQuantOptions + def Max(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos) + return 0.0 + + # FakeQuantOptions + def NumBits(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # FakeQuantOptions + def NarrowRange(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) + return False + +def FakeQuantOptionsStart(builder): builder.StartObject(4) +def FakeQuantOptionsAddMin(builder, min): builder.PrependFloat32Slot(0, min, 0.0) +def FakeQuantOptionsAddMax(builder, max): builder.PrependFloat32Slot(1, max, 0.0) +def FakeQuantOptionsAddNumBits(builder, numBits): builder.PrependInt32Slot(2, numBits, 0) +def FakeQuantOptionsAddNarrowRange(builder, narrowRange): builder.PrependBoolSlot(3, narrowRange, 0) +def FakeQuantOptionsEnd(builder): return builder.EndObject() diff --git a/lib/python3.10/site-packages/tf2onnx/tflite/FullyConnectedOptionsWeightsFormat.py b/lib/python3.10/site-packages/tf2onnx/tflite/FullyConnectedOptionsWeightsFormat.py new file mode 100644 index 0000000000000000000000000000000000000000..e88c4259d219e3593eff120fc93b8dd07650685c --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/tflite/FullyConnectedOptionsWeightsFormat.py @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: Apache-2.0 + +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +class FullyConnectedOptionsWeightsFormat(object): + DEFAULT = 0 + SHUFFLED4x16INT8 = 1 + diff --git a/lib/python3.10/site-packages/tf2onnx/tflite/HardSwishOptions.py b/lib/python3.10/site-packages/tf2onnx/tflite/HardSwishOptions.py new file mode 100644 index 0000000000000000000000000000000000000000..b6b45eaf0a1a0812d6e214e108db3497dc40d449 --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/tflite/HardSwishOptions.py @@ -0,0 +1,30 @@ +# SPDX-License-Identifier: Apache-2.0 + +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy +np = import_numpy() + +class HardSwishOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAsHardSwishOptions(cls, buf, offset): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = HardSwishOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def HardSwishOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # HardSwishOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + +def HardSwishOptionsStart(builder): builder.StartObject(0) +def HardSwishOptionsEnd(builder): return builder.EndObject() diff --git a/lib/python3.10/site-packages/tf2onnx/tflite/Int32Vector.py b/lib/python3.10/site-packages/tf2onnx/tflite/Int32Vector.py new file mode 100644 index 0000000000000000000000000000000000000000..55034f887c9c240c12561bf4cf344081dce3f51e --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/tflite/Int32Vector.py @@ -0,0 +1,59 @@ +# SPDX-License-Identifier: Apache-2.0 + +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy +np = import_numpy() + +class Int32Vector(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAsInt32Vector(cls, buf, offset): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = Int32Vector() + x.Init(buf, n + offset) + return x + + @classmethod + def Int32VectorBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # Int32Vector + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # Int32Vector + def Values(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) + return 0 + + # Int32Vector + def ValuesAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o) + return 0 + + # Int32Vector + def ValuesLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # Int32Vector + def ValuesIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + return o == 0 + +def Int32VectorStart(builder): builder.StartObject(1) +def Int32VectorAddValues(builder, values): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(values), 0) +def Int32VectorStartValuesVector(builder, numElems): return builder.StartVector(4, numElems, 4) +def Int32VectorEnd(builder): return builder.EndObject() diff --git a/lib/python3.10/site-packages/tf2onnx/tflite/MaximumMinimumOptions.py b/lib/python3.10/site-packages/tf2onnx/tflite/MaximumMinimumOptions.py new file mode 100644 index 0000000000000000000000000000000000000000..adbf1134b3a597486e93b646585fb75264480407 --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/tflite/MaximumMinimumOptions.py @@ -0,0 +1,30 @@ +# SPDX-License-Identifier: Apache-2.0 + +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy +np = import_numpy() + +class MaximumMinimumOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAsMaximumMinimumOptions(cls, buf, offset): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = MaximumMinimumOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def MaximumMinimumOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # MaximumMinimumOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + +def MaximumMinimumOptionsStart(builder): builder.StartObject(0) +def MaximumMinimumOptionsEnd(builder): return builder.EndObject() diff --git a/lib/python3.10/site-packages/tf2onnx/tflite/MirrorPadMode.py b/lib/python3.10/site-packages/tf2onnx/tflite/MirrorPadMode.py new file mode 100644 index 0000000000000000000000000000000000000000..40c9a74c57ad1369fcdd8543bcf2ddb752672596 --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/tflite/MirrorPadMode.py @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: Apache-2.0 + +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +class MirrorPadMode(object): + REFLECT = 0 + SYMMETRIC = 1 + diff --git a/lib/python3.10/site-packages/tf2onnx/tflite/NonMaxSuppressionV5Options.py b/lib/python3.10/site-packages/tf2onnx/tflite/NonMaxSuppressionV5Options.py new file mode 100644 index 0000000000000000000000000000000000000000..801a1f75a17beaa213c935d5e7adfc166878186e --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/tflite/NonMaxSuppressionV5Options.py @@ -0,0 +1,30 @@ +# SPDX-License-Identifier: Apache-2.0 + +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy +np = import_numpy() + +class NonMaxSuppressionV5Options(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAsNonMaxSuppressionV5Options(cls, buf, offset): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = NonMaxSuppressionV5Options() + x.Init(buf, n + offset) + return x + + @classmethod + def NonMaxSuppressionV5OptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # NonMaxSuppressionV5Options + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + +def NonMaxSuppressionV5OptionsStart(builder): builder.StartObject(0) +def NonMaxSuppressionV5OptionsEnd(builder): return builder.EndObject() diff --git a/lib/python3.10/site-packages/tf2onnx/tflite/NotEqualOptions.py b/lib/python3.10/site-packages/tf2onnx/tflite/NotEqualOptions.py new file mode 100644 index 0000000000000000000000000000000000000000..bd704ba7cb1c68a577cbe4f872b7107485c5ae30 --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/tflite/NotEqualOptions.py @@ -0,0 +1,30 @@ +# SPDX-License-Identifier: Apache-2.0 + +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy +np = import_numpy() + +class NotEqualOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAsNotEqualOptions(cls, buf, offset): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = NotEqualOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def NotEqualOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # NotEqualOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + +def NotEqualOptionsStart(builder): builder.StartObject(0) +def NotEqualOptionsEnd(builder): return builder.EndObject() diff --git a/lib/python3.10/site-packages/tf2onnx/tflite/OperatorCode.py b/lib/python3.10/site-packages/tf2onnx/tflite/OperatorCode.py new file mode 100644 index 0000000000000000000000000000000000000000..423f4f360053d53cd43d772d59f9ea093df48341 --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/tflite/OperatorCode.py @@ -0,0 +1,62 @@ +# SPDX-License-Identifier: Apache-2.0 + +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy +np = import_numpy() + +class OperatorCode(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAsOperatorCode(cls, buf, offset): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = OperatorCode() + x.Init(buf, n + offset) + return x + + @classmethod + def OperatorCodeBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # OperatorCode + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # OperatorCode + def DeprecatedBuiltinCode(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + + # OperatorCode + def CustomCode(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.String(o + self._tab.Pos) + return None + + # OperatorCode + def Version(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 1 + + # OperatorCode + def BuiltinCode(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + +def OperatorCodeStart(builder): builder.StartObject(4) +def OperatorCodeAddDeprecatedBuiltinCode(builder, deprecatedBuiltinCode): builder.PrependInt8Slot(0, deprecatedBuiltinCode, 0) +def OperatorCodeAddCustomCode(builder, customCode): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(customCode), 0) +def OperatorCodeAddVersion(builder, version): builder.PrependInt32Slot(2, version, 1) +def OperatorCodeAddBuiltinCode(builder, builtinCode): builder.PrependInt32Slot(3, builtinCode, 0) +def OperatorCodeEnd(builder): return builder.EndObject() diff --git a/lib/python3.10/site-packages/tf2onnx/tflite/Padding.py b/lib/python3.10/site-packages/tf2onnx/tflite/Padding.py new file mode 100644 index 0000000000000000000000000000000000000000..0a12d88ab2668d230ef6dd203d32e620283cb753 --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/tflite/Padding.py @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: Apache-2.0 + +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +class Padding(object): + SAME = 0 + VALID = 1 + diff --git a/lib/python3.10/site-packages/tf2onnx/tflite/Pool2DOptions.py b/lib/python3.10/site-packages/tf2onnx/tflite/Pool2DOptions.py new file mode 100644 index 0000000000000000000000000000000000000000..d0b662fbc10f4e473fe14a1acf18eaca401f0443 --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/tflite/Pool2DOptions.py @@ -0,0 +1,78 @@ +# SPDX-License-Identifier: Apache-2.0 + +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy +np = import_numpy() + +class Pool2DOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAsPool2DOptions(cls, buf, offset): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = Pool2DOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def Pool2DOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # Pool2DOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # Pool2DOptions + def Padding(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + + # Pool2DOptions + def StrideW(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # Pool2DOptions + def StrideH(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # Pool2DOptions + def FilterWidth(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # Pool2DOptions + def FilterHeight(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # Pool2DOptions + def FusedActivationFunction(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + +def Pool2DOptionsStart(builder): builder.StartObject(6) +def Pool2DOptionsAddPadding(builder, padding): builder.PrependInt8Slot(0, padding, 0) +def Pool2DOptionsAddStrideW(builder, strideW): builder.PrependInt32Slot(1, strideW, 0) +def Pool2DOptionsAddStrideH(builder, strideH): builder.PrependInt32Slot(2, strideH, 0) +def Pool2DOptionsAddFilterWidth(builder, filterWidth): builder.PrependInt32Slot(3, filterWidth, 0) +def Pool2DOptionsAddFilterHeight(builder, filterHeight): builder.PrependInt32Slot(4, filterHeight, 0) +def Pool2DOptionsAddFusedActivationFunction(builder, fusedActivationFunction): builder.PrependInt8Slot(5, fusedActivationFunction, 0) +def Pool2DOptionsEnd(builder): return builder.EndObject() diff --git a/lib/python3.10/site-packages/tf2onnx/tflite/PowOptions.py b/lib/python3.10/site-packages/tf2onnx/tflite/PowOptions.py new file mode 100644 index 0000000000000000000000000000000000000000..ccde99400ad1ecfc1dbad27f651736b7dc9a40aa --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/tflite/PowOptions.py @@ -0,0 +1,30 @@ +# SPDX-License-Identifier: Apache-2.0 + +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy +np = import_numpy() + +class PowOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAsPowOptions(cls, buf, offset): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = PowOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def PowOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # PowOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + +def PowOptionsStart(builder): builder.StartObject(0) +def PowOptionsEnd(builder): return builder.EndObject() diff --git a/lib/python3.10/site-packages/tf2onnx/tflite/QuantizationParameters.py b/lib/python3.10/site-packages/tf2onnx/tflite/QuantizationParameters.py new file mode 100644 index 0000000000000000000000000000000000000000..2bcad60ef12311729ce28d6efa070aa83ee87287 --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/tflite/QuantizationParameters.py @@ -0,0 +1,173 @@ +# SPDX-License-Identifier: Apache-2.0 + +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy +np = import_numpy() + +class QuantizationParameters(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAsQuantizationParameters(cls, buf, offset): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = QuantizationParameters() + x.Init(buf, n + offset) + return x + + @classmethod + def QuantizationParametersBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # QuantizationParameters + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # QuantizationParameters + def Min(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Float32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) + return 0 + + # QuantizationParameters + def MinAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Float32Flags, o) + return 0 + + # QuantizationParameters + def MinLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # QuantizationParameters + def MinIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + return o == 0 + + # QuantizationParameters + def Max(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Float32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) + return 0 + + # QuantizationParameters + def MaxAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Float32Flags, o) + return 0 + + # QuantizationParameters + def MaxLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # QuantizationParameters + def MaxIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + return o == 0 + + # QuantizationParameters + def Scale(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Float32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) + return 0 + + # QuantizationParameters + def ScaleAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Float32Flags, o) + return 0 + + # QuantizationParameters + def ScaleLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # QuantizationParameters + def ScaleIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + return o == 0 + + # QuantizationParameters + def ZeroPoint(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Int64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8)) + return 0 + + # QuantizationParameters + def ZeroPointAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int64Flags, o) + return 0 + + # QuantizationParameters + def ZeroPointLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # QuantizationParameters + def ZeroPointIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + return o == 0 + + # QuantizationParameters + def DetailsType(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Uint8Flags, o + self._tab.Pos) + return 0 + + # QuantizationParameters + def Details(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) + if o != 0: + from flatbuffers.table import Table + obj = Table(bytearray(), 0) + self._tab.Union(obj, o) + return obj + return None + + # QuantizationParameters + def QuantizedDimension(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + +def QuantizationParametersStart(builder): builder.StartObject(7) +def QuantizationParametersAddMin(builder, min): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(min), 0) +def QuantizationParametersStartMinVector(builder, numElems): return builder.StartVector(4, numElems, 4) +def QuantizationParametersAddMax(builder, max): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(max), 0) +def QuantizationParametersStartMaxVector(builder, numElems): return builder.StartVector(4, numElems, 4) +def QuantizationParametersAddScale(builder, scale): builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(scale), 0) +def QuantizationParametersStartScaleVector(builder, numElems): return builder.StartVector(4, numElems, 4) +def QuantizationParametersAddZeroPoint(builder, zeroPoint): builder.PrependUOffsetTRelativeSlot(3, flatbuffers.number_types.UOffsetTFlags.py_type(zeroPoint), 0) +def QuantizationParametersStartZeroPointVector(builder, numElems): return builder.StartVector(8, numElems, 8) +def QuantizationParametersAddDetailsType(builder, detailsType): builder.PrependUint8Slot(4, detailsType, 0) +def QuantizationParametersAddDetails(builder, details): builder.PrependUOffsetTRelativeSlot(5, flatbuffers.number_types.UOffsetTFlags.py_type(details), 0) +def QuantizationParametersAddQuantizedDimension(builder, quantizedDimension): builder.PrependInt32Slot(6, quantizedDimension, 0) +def QuantizationParametersEnd(builder): return builder.EndObject() diff --git a/lib/python3.10/site-packages/tf2onnx/tflite/ReshapeOptions.py b/lib/python3.10/site-packages/tf2onnx/tflite/ReshapeOptions.py new file mode 100644 index 0000000000000000000000000000000000000000..bd63cffdce848317e15d09493681508b6e675d19 --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/tflite/ReshapeOptions.py @@ -0,0 +1,59 @@ +# SPDX-License-Identifier: Apache-2.0 + +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy +np = import_numpy() + +class ReshapeOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAsReshapeOptions(cls, buf, offset): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = ReshapeOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def ReshapeOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # ReshapeOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # ReshapeOptions + def NewShape(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) + return 0 + + # ReshapeOptions + def NewShapeAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o) + return 0 + + # ReshapeOptions + def NewShapeLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # ReshapeOptions + def NewShapeIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + return o == 0 + +def ReshapeOptionsStart(builder): builder.StartObject(1) +def ReshapeOptionsAddNewShape(builder, newShape): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(newShape), 0) +def ReshapeOptionsStartNewShapeVector(builder, numElems): return builder.StartVector(4, numElems, 4) +def ReshapeOptionsEnd(builder): return builder.EndObject() diff --git a/lib/python3.10/site-packages/tf2onnx/tflite/ResizeNearestNeighborOptions.py b/lib/python3.10/site-packages/tf2onnx/tflite/ResizeNearestNeighborOptions.py new file mode 100644 index 0000000000000000000000000000000000000000..0c2dbabfdd866d4eca441eab5984c75d28029c8b --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/tflite/ResizeNearestNeighborOptions.py @@ -0,0 +1,46 @@ +# SPDX-License-Identifier: Apache-2.0 + +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy +np = import_numpy() + +class ResizeNearestNeighborOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAsResizeNearestNeighborOptions(cls, buf, offset): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = ResizeNearestNeighborOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def ResizeNearestNeighborOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # ResizeNearestNeighborOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # ResizeNearestNeighborOptions + def AlignCorners(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) + return False + + # ResizeNearestNeighborOptions + def HalfPixelCenters(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) + return False + +def ResizeNearestNeighborOptionsStart(builder): builder.StartObject(2) +def ResizeNearestNeighborOptionsAddAlignCorners(builder, alignCorners): builder.PrependBoolSlot(0, alignCorners, 0) +def ResizeNearestNeighborOptionsAddHalfPixelCenters(builder, halfPixelCenters): builder.PrependBoolSlot(1, halfPixelCenters, 0) +def ResizeNearestNeighborOptionsEnd(builder): return builder.EndObject() diff --git a/lib/python3.10/site-packages/tf2onnx/tflite/SelectV2Options.py b/lib/python3.10/site-packages/tf2onnx/tflite/SelectV2Options.py new file mode 100644 index 0000000000000000000000000000000000000000..616c3f0bfb7d47cbccd44857b1be49d070d158c7 --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/tflite/SelectV2Options.py @@ -0,0 +1,30 @@ +# SPDX-License-Identifier: Apache-2.0 + +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy +np = import_numpy() + +class SelectV2Options(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAsSelectV2Options(cls, buf, offset): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = SelectV2Options() + x.Init(buf, n + offset) + return x + + @classmethod + def SelectV2OptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # SelectV2Options + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + +def SelectV2OptionsStart(builder): builder.StartObject(0) +def SelectV2OptionsEnd(builder): return builder.EndObject() diff --git a/lib/python3.10/site-packages/tf2onnx/tflite/SequenceRNNOptions.py b/lib/python3.10/site-packages/tf2onnx/tflite/SequenceRNNOptions.py new file mode 100644 index 0000000000000000000000000000000000000000..1300f9cbc9c43164edf84bfa095973c917121d41 --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/tflite/SequenceRNNOptions.py @@ -0,0 +1,54 @@ +# SPDX-License-Identifier: Apache-2.0 + +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy +np = import_numpy() + +class SequenceRNNOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAsSequenceRNNOptions(cls, buf, offset): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = SequenceRNNOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def SequenceRNNOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # SequenceRNNOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # SequenceRNNOptions + def TimeMajor(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) + return False + + # SequenceRNNOptions + def FusedActivationFunction(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + + # SequenceRNNOptions + def AsymmetricQuantizeInputs(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) + return False + +def SequenceRNNOptionsStart(builder): builder.StartObject(3) +def SequenceRNNOptionsAddTimeMajor(builder, timeMajor): builder.PrependBoolSlot(0, timeMajor, 0) +def SequenceRNNOptionsAddFusedActivationFunction(builder, fusedActivationFunction): builder.PrependInt8Slot(1, fusedActivationFunction, 0) +def SequenceRNNOptionsAddAsymmetricQuantizeInputs(builder, asymmetricQuantizeInputs): builder.PrependBoolSlot(2, asymmetricQuantizeInputs, 0) +def SequenceRNNOptionsEnd(builder): return builder.EndObject() diff --git a/lib/python3.10/site-packages/tf2onnx/tflite/SparseToDenseOptions.py b/lib/python3.10/site-packages/tf2onnx/tflite/SparseToDenseOptions.py new file mode 100644 index 0000000000000000000000000000000000000000..7b5ecc36794c93e7351e49b698224c0e227ab194 --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/tflite/SparseToDenseOptions.py @@ -0,0 +1,38 @@ +# SPDX-License-Identifier: Apache-2.0 + +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy +np = import_numpy() + +class SparseToDenseOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAsSparseToDenseOptions(cls, buf, offset): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = SparseToDenseOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def SparseToDenseOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # SparseToDenseOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # SparseToDenseOptions + def ValidateIndices(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) + return False + +def SparseToDenseOptionsStart(builder): builder.StartObject(1) +def SparseToDenseOptionsAddValidateIndices(builder, validateIndices): builder.PrependBoolSlot(0, validateIndices, 0) +def SparseToDenseOptionsEnd(builder): return builder.EndObject() diff --git a/lib/python3.10/site-packages/tf2onnx/tflite/SparsityParameters.py b/lib/python3.10/site-packages/tf2onnx/tflite/SparsityParameters.py new file mode 100644 index 0000000000000000000000000000000000000000..a233ec0741ee686c3ba05cde4bda758a8e7b8564 --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/tflite/SparsityParameters.py @@ -0,0 +1,115 @@ +# SPDX-License-Identifier: Apache-2.0 + +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy +np = import_numpy() + +class SparsityParameters(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAsSparsityParameters(cls, buf, offset): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = SparsityParameters() + x.Init(buf, n + offset) + return x + + @classmethod + def SparsityParametersBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # SparsityParameters + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # SparsityParameters + def TraversalOrder(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) + return 0 + + # SparsityParameters + def TraversalOrderAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o) + return 0 + + # SparsityParameters + def TraversalOrderLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # SparsityParameters + def TraversalOrderIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + return o == 0 + + # SparsityParameters + def BlockMap(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4)) + return 0 + + # SparsityParameters + def BlockMapAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o) + return 0 + + # SparsityParameters + def BlockMapLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # SparsityParameters + def BlockMapIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + return o == 0 + + # SparsityParameters + def DimMetadata(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + x = self._tab.Vector(o) + x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4 + x = self._tab.Indirect(x) + from tf2onnx.tflite.DimensionMetadata import DimensionMetadata + obj = DimensionMetadata() + obj.Init(self._tab.Bytes, x) + return obj + return None + + # SparsityParameters + def DimMetadataLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # SparsityParameters + def DimMetadataIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + return o == 0 + +def SparsityParametersStart(builder): builder.StartObject(3) +def SparsityParametersAddTraversalOrder(builder, traversalOrder): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(traversalOrder), 0) +def SparsityParametersStartTraversalOrderVector(builder, numElems): return builder.StartVector(4, numElems, 4) +def SparsityParametersAddBlockMap(builder, blockMap): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(blockMap), 0) +def SparsityParametersStartBlockMapVector(builder, numElems): return builder.StartVector(4, numElems, 4) +def SparsityParametersAddDimMetadata(builder, dimMetadata): builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(dimMetadata), 0) +def SparsityParametersStartDimMetadataVector(builder, numElems): return builder.StartVector(4, numElems, 4) +def SparsityParametersEnd(builder): return builder.EndObject() diff --git a/lib/python3.10/site-packages/tf2onnx/tflite/StridedSliceOptions.py b/lib/python3.10/site-packages/tf2onnx/tflite/StridedSliceOptions.py new file mode 100644 index 0000000000000000000000000000000000000000..4d7a530d102c865b26eae3d43d70561027cf5eda --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/tflite/StridedSliceOptions.py @@ -0,0 +1,70 @@ +# SPDX-License-Identifier: Apache-2.0 + +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy +np = import_numpy() + +class StridedSliceOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAsStridedSliceOptions(cls, buf, offset): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = StridedSliceOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def StridedSliceOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # StridedSliceOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # StridedSliceOptions + def BeginMask(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # StridedSliceOptions + def EndMask(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # StridedSliceOptions + def EllipsisMask(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # StridedSliceOptions + def NewAxisMask(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # StridedSliceOptions + def ShrinkAxisMask(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + +def StridedSliceOptionsStart(builder): builder.StartObject(5) +def StridedSliceOptionsAddBeginMask(builder, beginMask): builder.PrependInt32Slot(0, beginMask, 0) +def StridedSliceOptionsAddEndMask(builder, endMask): builder.PrependInt32Slot(1, endMask, 0) +def StridedSliceOptionsAddEllipsisMask(builder, ellipsisMask): builder.PrependInt32Slot(2, ellipsisMask, 0) +def StridedSliceOptionsAddNewAxisMask(builder, newAxisMask): builder.PrependInt32Slot(3, newAxisMask, 0) +def StridedSliceOptionsAddShrinkAxisMask(builder, shrinkAxisMask): builder.PrependInt32Slot(4, shrinkAxisMask, 0) +def StridedSliceOptionsEnd(builder): return builder.EndObject() diff --git a/lib/python3.10/site-packages/tf2onnx/tflite/TransposeConvOptions.py b/lib/python3.10/site-packages/tf2onnx/tflite/TransposeConvOptions.py new file mode 100644 index 0000000000000000000000000000000000000000..d3ff5accfad8f7a16e10b7842960a9bf04fa615a --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/tflite/TransposeConvOptions.py @@ -0,0 +1,54 @@ +# SPDX-License-Identifier: Apache-2.0 + +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy +np = import_numpy() + +class TransposeConvOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAsTransposeConvOptions(cls, buf, offset): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = TransposeConvOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def TransposeConvOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # TransposeConvOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # TransposeConvOptions + def Padding(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos) + return 0 + + # TransposeConvOptions + def StrideW(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + + # TransposeConvOptions + def StrideH(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos) + return 0 + +def TransposeConvOptionsStart(builder): builder.StartObject(3) +def TransposeConvOptionsAddPadding(builder, padding): builder.PrependInt8Slot(0, padding, 0) +def TransposeConvOptionsAddStrideW(builder, strideW): builder.PrependInt32Slot(1, strideW, 0) +def TransposeConvOptionsAddStrideH(builder, strideH): builder.PrependInt32Slot(2, strideH, 0) +def TransposeConvOptionsEnd(builder): return builder.EndObject() diff --git a/lib/python3.10/site-packages/tf2onnx/tflite/TransposeOptions.py b/lib/python3.10/site-packages/tf2onnx/tflite/TransposeOptions.py new file mode 100644 index 0000000000000000000000000000000000000000..034055c8df98225c8f904ba6d6a02806131b185f --- /dev/null +++ b/lib/python3.10/site-packages/tf2onnx/tflite/TransposeOptions.py @@ -0,0 +1,30 @@ +# SPDX-License-Identifier: Apache-2.0 + +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: tflite + +import flatbuffers +from flatbuffers.compat import import_numpy +np = import_numpy() + +class TransposeOptions(object): + __slots__ = ['_tab'] + + @classmethod + def GetRootAsTransposeOptions(cls, buf, offset): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = TransposeOptions() + x.Init(buf, n + offset) + return x + + @classmethod + def TransposeOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): + return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed) + + # TransposeOptions + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + +def TransposeOptionsStart(builder): builder.StartObject(0) +def TransposeOptionsEnd(builder): return builder.EndObject()