Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +1 -0
- lib/python3.10/site-packages/av/audio/fifo.cpython-310-x86_64-linux-gnu.so +3 -0
- lib/python3.10/site-packages/jsonschema_specifications-2024.10.1.dist-info/INSTALLER +1 -0
- lib/python3.10/site-packages/jsonschema_specifications-2024.10.1.dist-info/METADATA +55 -0
- lib/python3.10/site-packages/jsonschema_specifications-2024.10.1.dist-info/RECORD +30 -0
- lib/python3.10/site-packages/jsonschema_specifications-2024.10.1.dist-info/REQUESTED +0 -0
- lib/python3.10/site-packages/jsonschema_specifications-2024.10.1.dist-info/WHEEL +4 -0
- lib/python3.10/site-packages/numba/cuda/tests/cudapy/recursion_usecases.py +100 -0
- lib/python3.10/site-packages/numba/cuda/tests/cudapy/test_boolean.py +24 -0
- lib/python3.10/site-packages/numba/cuda/tests/cudapy/test_cffi.py +33 -0
- lib/python3.10/site-packages/numba/cuda/tests/cudapy/test_debuginfo.py +221 -0
- lib/python3.10/site-packages/numba/cuda/tests/cudapy/test_exception.py +174 -0
- lib/python3.10/site-packages/numba/cuda/tests/cudapy/test_freevar.py +29 -0
- lib/python3.10/site-packages/numba/cuda/tests/cudapy/test_gufunc_scheduling.py +95 -0
- lib/python3.10/site-packages/numba/cuda/tests/cudapy/test_iterators.py +99 -0
- lib/python3.10/site-packages/numba/cuda/tests/cudapy/test_sm_creation.py +205 -0
- lib/python3.10/site-packages/numba/cuda/tests/cudapy/test_userexc.py +47 -0
- lib/python3.10/site-packages/tf2onnx/custom_opsets/__init__.py +7 -0
- lib/python3.10/site-packages/tf2onnx/custom_opsets/ms.py +115 -0
- lib/python3.10/site-packages/tf2onnx/custom_opsets/onnx_ml.py +99 -0
- lib/python3.10/site-packages/tf2onnx/custom_opsets/string_ops.py +170 -0
- lib/python3.10/site-packages/tf2onnx/onnx_opset/__init__.py +19 -0
- lib/python3.10/site-packages/tf2onnx/onnx_opset/common.py +69 -0
- lib/python3.10/site-packages/tf2onnx/onnx_opset/controlflow.py +672 -0
- lib/python3.10/site-packages/tf2onnx/onnx_opset/generator.py +254 -0
- lib/python3.10/site-packages/tf2onnx/onnx_opset/logical.py +137 -0
- lib/python3.10/site-packages/tf2onnx/onnx_opset/math.py +740 -0
- lib/python3.10/site-packages/tf2onnx/onnx_opset/misc.py +48 -0
- lib/python3.10/site-packages/tf2onnx/onnx_opset/nn.py +1534 -0
- lib/python3.10/site-packages/tf2onnx/onnx_opset/quantize.py +89 -0
- lib/python3.10/site-packages/tf2onnx/onnx_opset/reduction.py +316 -0
- lib/python3.10/site-packages/tf2onnx/onnx_opset/rnn.py +263 -0
- lib/python3.10/site-packages/tf2onnx/onnx_opset/signal.py +312 -0
- lib/python3.10/site-packages/tf2onnx/onnx_opset/tensor.py +0 -0
- lib/python3.10/site-packages/tf2onnx/onnx_opset/traditionalml.py +14 -0
- lib/python3.10/site-packages/tf2onnx/optimizer/__init__.py +79 -0
- lib/python3.10/site-packages/tf2onnx/optimizer/back_to_back_optimizer.py +254 -0
- lib/python3.10/site-packages/tf2onnx/optimizer/const_dequantize_optimizer.py +112 -0
- lib/python3.10/site-packages/tf2onnx/optimizer/const_fold_optimizer.py +155 -0
- lib/python3.10/site-packages/tf2onnx/optimizer/identity_optimizer.py +85 -0
- lib/python3.10/site-packages/tf2onnx/optimizer/loop_optimizer.py +90 -0
- lib/python3.10/site-packages/tf2onnx/optimizer/merge_duplicated_nodes_optimizer.py +120 -0
- lib/python3.10/site-packages/tf2onnx/optimizer/optimizer_base.py +76 -0
- lib/python3.10/site-packages/tf2onnx/optimizer/transpose_optimizer.py +829 -0
- lib/python3.10/site-packages/tf2onnx/optimizer/upsample_optimizer.py +53 -0
- lib/python3.10/site-packages/tf2onnx/tflite/BroadcastToOptions.py +30 -0
- lib/python3.10/site-packages/tf2onnx/tflite/BuiltinOperator.py +140 -0
- lib/python3.10/site-packages/tf2onnx/tflite/BuiltinOptions.py +114 -0
- lib/python3.10/site-packages/tf2onnx/tflite/CallOptions.py +38 -0
- lib/python3.10/site-packages/tf2onnx/tflite/ConcatEmbeddingsOptions.py +96 -0
.gitattributes
CHANGED
|
@@ -101,3 +101,4 @@ lib/python3.10/site-packages/av/audio/layout.cpython-310-x86_64-linux-gnu.so fil
|
|
| 101 |
lib/python3.10/site-packages/av/audio/plane.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 102 |
lib/python3.10/site-packages/av/audio/stream.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 103 |
lib/python3.10/site-packages/av/audio/codeccontext.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 101 |
lib/python3.10/site-packages/av/audio/plane.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 102 |
lib/python3.10/site-packages/av/audio/stream.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 103 |
lib/python3.10/site-packages/av/audio/codeccontext.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 104 |
+
lib/python3.10/site-packages/av/audio/fifo.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
lib/python3.10/site-packages/av/audio/fifo.cpython-310-x86_64-linux-gnu.so
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:39e9bb1e44c14e5091b8a3e2f21841ca942edc677dc7ad5050dc6d98fdb15108
|
| 3 |
+
size 634401
|
lib/python3.10/site-packages/jsonschema_specifications-2024.10.1.dist-info/INSTALLER
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
uv
|
lib/python3.10/site-packages/jsonschema_specifications-2024.10.1.dist-info/METADATA
ADDED
|
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Metadata-Version: 2.3
|
| 2 |
+
Name: jsonschema-specifications
|
| 3 |
+
Version: 2024.10.1
|
| 4 |
+
Summary: The JSON Schema meta-schemas and vocabularies, exposed as a Registry
|
| 5 |
+
Project-URL: Documentation, https://jsonschema-specifications.readthedocs.io/
|
| 6 |
+
Project-URL: Homepage, https://github.com/python-jsonschema/jsonschema-specifications
|
| 7 |
+
Project-URL: Issues, https://github.com/python-jsonschema/jsonschema-specifications/issues/
|
| 8 |
+
Project-URL: Funding, https://github.com/sponsors/Julian
|
| 9 |
+
Project-URL: Tidelift, https://tidelift.com/subscription/pkg/pypi-jsonschema-specifications?utm_source=pypi-jsonschema-specifications&utm_medium=referral&utm_campaign=pypi-link
|
| 10 |
+
Project-URL: Source, https://github.com/python-jsonschema/jsonschema-specifications
|
| 11 |
+
Author-email: Julian Berman <Julian+jsonschema-specifications@GrayVines.com>
|
| 12 |
+
License-File: COPYING
|
| 13 |
+
Keywords: data validation,json,json schema,jsonschema,validation
|
| 14 |
+
Classifier: Development Status :: 5 - Production/Stable
|
| 15 |
+
Classifier: Intended Audience :: Developers
|
| 16 |
+
Classifier: License :: OSI Approved :: MIT License
|
| 17 |
+
Classifier: Operating System :: OS Independent
|
| 18 |
+
Classifier: Programming Language :: Python
|
| 19 |
+
Classifier: Programming Language :: Python :: 3.8
|
| 20 |
+
Classifier: Programming Language :: Python :: 3.9
|
| 21 |
+
Classifier: Programming Language :: Python :: 3.10
|
| 22 |
+
Classifier: Programming Language :: Python :: 3.11
|
| 23 |
+
Classifier: Programming Language :: Python :: 3.12
|
| 24 |
+
Classifier: Programming Language :: Python :: 3.13
|
| 25 |
+
Classifier: Programming Language :: Python :: Implementation :: CPython
|
| 26 |
+
Classifier: Programming Language :: Python :: Implementation :: PyPy
|
| 27 |
+
Classifier: Topic :: File Formats :: JSON
|
| 28 |
+
Classifier: Topic :: File Formats :: JSON :: JSON Schema
|
| 29 |
+
Requires-Python: >=3.9
|
| 30 |
+
Requires-Dist: referencing>=0.31.0
|
| 31 |
+
Description-Content-Type: text/x-rst
|
| 32 |
+
|
| 33 |
+
=============================
|
| 34 |
+
``jsonschema-specifications``
|
| 35 |
+
=============================
|
| 36 |
+
|
| 37 |
+
|PyPI| |Pythons| |CI| |ReadTheDocs|
|
| 38 |
+
|
| 39 |
+
JSON support files from the `JSON Schema Specifications <https://json-schema.org/specification.html>`_ (metaschemas, vocabularies, etc.), packaged for runtime access from Python as a `referencing-based Schema Registry <https://referencing.readthedocs.io/en/stable/api/#referencing.Registry>`_.
|
| 40 |
+
|
| 41 |
+
.. |PyPI| image:: https://img.shields.io/pypi/v/jsonschema-specifications.svg
|
| 42 |
+
:alt: PyPI version
|
| 43 |
+
:target: https://pypi.org/project/jsonschema-specifications/
|
| 44 |
+
|
| 45 |
+
.. |Pythons| image:: https://img.shields.io/pypi/pyversions/jsonschema-specifications.svg
|
| 46 |
+
:alt: Supported Python versions
|
| 47 |
+
:target: https://pypi.org/project/jsonschema-specifications/
|
| 48 |
+
|
| 49 |
+
.. |CI| image:: https://github.com/python-jsonschema/jsonschema-specifications/workflows/CI/badge.svg
|
| 50 |
+
:alt: Build status
|
| 51 |
+
:target: https://github.com/python-jsonschema/jsonschema-specifications/actions?query=workflow%3ACI
|
| 52 |
+
|
| 53 |
+
.. |ReadTheDocs| image:: https://readthedocs.org/projects/jsonschema-specifications/badge/?version=stable&style=flat
|
| 54 |
+
:alt: ReadTheDocs status
|
| 55 |
+
:target: https://jsonschema-specifications.readthedocs.io/en/stable/
|
lib/python3.10/site-packages/jsonschema_specifications-2024.10.1.dist-info/RECORD
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
jsonschema_specifications-2024.10.1.dist-info/INSTALLER,sha256=5hhM4Q4mYTT9z6QB6PGpUAW81PGNFrYrdXMj4oM_6ak,2
|
| 2 |
+
jsonschema_specifications-2024.10.1.dist-info/METADATA,sha256=-jCfClPka5D4aDTtJ683zNiEcSHXhPBLuk9r9XWwyHI,2985
|
| 3 |
+
jsonschema_specifications-2024.10.1.dist-info/RECORD,,
|
| 4 |
+
jsonschema_specifications-2024.10.1.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 5 |
+
jsonschema_specifications-2024.10.1.dist-info/WHEEL,sha256=1yFddiXMmvYK7QYTqtRNtX66WJ0Mz8PYEiEUoOUUxRY,87
|
| 6 |
+
jsonschema_specifications-2024.10.1.dist-info/licenses/COPYING,sha256=QtzWNJX4e063x3V6-jebtVpT-Ur9el9lfZrfVyNuUVw,1057
|
| 7 |
+
jsonschema_specifications/__init__.py,sha256=qoTB2DKY7qvNrGhMPH6gtmAJRLilmVQ-fFZwT6ryqw0,386
|
| 8 |
+
jsonschema_specifications/_core.py,sha256=tFhc1CMleJ3AJOK_bjxOpFQTdrsUClFGfFxPBU_CebM,1140
|
| 9 |
+
jsonschema_specifications/schemas/draft201909/metaschema.json,sha256=e3YbPhIfCgyh6ioLjizIVrz4AWBLgmjXG6yqICvAwTs,1785
|
| 10 |
+
jsonschema_specifications/schemas/draft201909/vocabularies/applicator,sha256=aJUQDplyb7sQcFhRK77D7P1LJOj9L6zuPlBe5ysNTDE,1860
|
| 11 |
+
jsonschema_specifications/schemas/draft201909/vocabularies/content,sha256=m31PVaTi_bAsQwBo_f-rxzKt3OI42j8d8mkCScM1MnQ,517
|
| 12 |
+
jsonschema_specifications/schemas/draft201909/vocabularies/core,sha256=taLElX9kldClCB8ECevooU5BOayyA_x0hHH47eKvWyw,1531
|
| 13 |
+
jsonschema_specifications/schemas/draft201909/vocabularies/meta-data,sha256=1H4kRd1qgicaKY2DzGxsuNSuHhXg3Fa-zTehY-zwEoY,892
|
| 14 |
+
jsonschema_specifications/schemas/draft201909/vocabularies/validation,sha256=HlJsHTNac0gF_ILPV5jBK5YK19olF8Zs2lobCTWcPBw,2834
|
| 15 |
+
jsonschema_specifications/schemas/draft202012/metaschema.json,sha256=Qdp29a-3zgYtJI92JGOpL3ykfk4PkFsiS6av7vkd7Q8,2452
|
| 16 |
+
jsonschema_specifications/schemas/draft202012/vocabularies/applicator,sha256=xKbkFHuR_vf-ptwFjLG_k0AvdBS3ZXiosWqvHa1qrO8,1659
|
| 17 |
+
jsonschema_specifications/schemas/draft202012/vocabularies/content,sha256=CDQ3R3ZOSlgUJieTz01lIFenkThjxZUNQyl-jh_axbY,519
|
| 18 |
+
jsonschema_specifications/schemas/draft202012/vocabularies/core,sha256=wtEqjk3RHTNt_IOj9mOqTGnwtJs76wlP_rJbUxb0gD0,1564
|
| 19 |
+
jsonschema_specifications/schemas/draft202012/vocabularies/format,sha256=UOu_55BhGoSbjMQAoJwdDg-2q1wNQ6DyIgH9NiUFa_Q,403
|
| 20 |
+
jsonschema_specifications/schemas/draft202012/vocabularies/format-annotation,sha256=q8d1rf79idIjWBcNm_k_Tr0jSVY7u-3WDwK-98gSvMA,448
|
| 21 |
+
jsonschema_specifications/schemas/draft202012/vocabularies/format-assertion,sha256=xSJCuaG7eGsmw-gset1CjDH5yW5XXc6Z5W6l_qptogw,445
|
| 22 |
+
jsonschema_specifications/schemas/draft202012/vocabularies/meta-data,sha256=j3bW4U9Bubku-TO3CM3FFEyLUmhlGtEZGEhfsXVPHHY,892
|
| 23 |
+
jsonschema_specifications/schemas/draft202012/vocabularies/unevaluated,sha256=Lb-8tzmUtnCwl2SSre4f_7RsIWgnhNL1pMpWH54tDLQ,506
|
| 24 |
+
jsonschema_specifications/schemas/draft202012/vocabularies/validation,sha256=cBCjHlQfMtK-ch4t40jfdcmzaHaj7TBId_wKvaHTelg,2834
|
| 25 |
+
jsonschema_specifications/schemas/draft3/metaschema.json,sha256=LPdfZENvtb43Si6qJ6uLfh_WUcm0ba6mxnsC_WTiRYs,2600
|
| 26 |
+
jsonschema_specifications/schemas/draft4/metaschema.json,sha256=4UidC0dV8CeTMCWR0_y48Htok6gqlPJIlfjk7fEbguI,4357
|
| 27 |
+
jsonschema_specifications/schemas/draft6/metaschema.json,sha256=wp386fVINcOgbAOzxdXsDtp3cGVo-cTffPvHVmpRAG0,4437
|
| 28 |
+
jsonschema_specifications/schemas/draft7/metaschema.json,sha256=PVOSCIJhYGxVm2A_OFMpyfGrRbXWZ-uZBodFOwVdQF4,4819
|
| 29 |
+
jsonschema_specifications/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 30 |
+
jsonschema_specifications/tests/test_jsonschema_specifications.py,sha256=WkbYRW6A6FoZ0rivShfqVLSCsAiHJ2x8TxqECJTXPTY,1106
|
lib/python3.10/site-packages/jsonschema_specifications-2024.10.1.dist-info/REQUESTED
ADDED
|
File without changes
|
lib/python3.10/site-packages/jsonschema_specifications-2024.10.1.dist-info/WHEEL
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Wheel-Version: 1.0
|
| 2 |
+
Generator: hatchling 1.25.0
|
| 3 |
+
Root-Is-Purelib: true
|
| 4 |
+
Tag: py3-none-any
|
lib/python3.10/site-packages/numba/cuda/tests/cudapy/recursion_usecases.py
ADDED
|
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Usecases of recursive functions in the CUDA target, many derived from
|
| 3 |
+
numba/tests/recursion_usecases.py.
|
| 4 |
+
|
| 5 |
+
Some functions are compiled at import time, hence a separate module.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
from numba import cuda
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
@cuda.jit("i8(i8)", device=True)
|
| 12 |
+
def fib1(n):
|
| 13 |
+
if n < 2:
|
| 14 |
+
return n
|
| 15 |
+
# Note the second call does not use a named argument, unlike the CPU target
|
| 16 |
+
# usecase
|
| 17 |
+
return fib1(n - 1) + fib1(n - 2)
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def make_fib2():
|
| 21 |
+
@cuda.jit("i8(i8)", device=True)
|
| 22 |
+
def fib2(n):
|
| 23 |
+
if n < 2:
|
| 24 |
+
return n
|
| 25 |
+
return fib2(n - 1) + fib2(n - 2)
|
| 26 |
+
|
| 27 |
+
return fib2
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
fib2 = make_fib2()
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
@cuda.jit
|
| 34 |
+
def type_change_self(x, y):
|
| 35 |
+
if x > 1 and y > 0:
|
| 36 |
+
return x + type_change_self(x - y, y)
|
| 37 |
+
else:
|
| 38 |
+
return y
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
# Implicit signature
|
| 42 |
+
@cuda.jit(device=True)
|
| 43 |
+
def fib3(n):
|
| 44 |
+
if n < 2:
|
| 45 |
+
return n
|
| 46 |
+
|
| 47 |
+
return fib3(n - 1) + fib3(n - 2)
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
# Run-away self recursion
|
| 51 |
+
@cuda.jit(device=True)
|
| 52 |
+
def runaway_self(x):
|
| 53 |
+
return runaway_self(x)
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
@cuda.jit(device=True)
|
| 57 |
+
def raise_self(x):
|
| 58 |
+
if x == 1:
|
| 59 |
+
raise ValueError("raise_self")
|
| 60 |
+
elif x > 0:
|
| 61 |
+
return raise_self(x - 1)
|
| 62 |
+
else:
|
| 63 |
+
return 1
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
@cuda.jit(debug=True, opt=False)
|
| 67 |
+
def raise_self_kernel(x):
|
| 68 |
+
raise_self(x)
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
def make_optional_return_case(jit=lambda x: x):
|
| 72 |
+
@jit
|
| 73 |
+
def foo(x):
|
| 74 |
+
if x > 5:
|
| 75 |
+
return x - 1
|
| 76 |
+
else:
|
| 77 |
+
return
|
| 78 |
+
|
| 79 |
+
@jit
|
| 80 |
+
def bar(x):
|
| 81 |
+
out = foo(x)
|
| 82 |
+
if out is None:
|
| 83 |
+
return out
|
| 84 |
+
elif out < 8:
|
| 85 |
+
return out
|
| 86 |
+
else:
|
| 87 |
+
return x * bar(out)
|
| 88 |
+
|
| 89 |
+
return bar
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
def make_growing_tuple_case(jit=lambda x: x):
|
| 93 |
+
# From issue #4387
|
| 94 |
+
@jit
|
| 95 |
+
def make_list(n):
|
| 96 |
+
if n <= 0:
|
| 97 |
+
return None
|
| 98 |
+
|
| 99 |
+
return (n, make_list(n - 1))
|
| 100 |
+
return make_list
|
lib/python3.10/site-packages/numba/cuda/tests/cudapy/test_boolean.py
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
from numba.cuda.testing import unittest, CUDATestCase
|
| 3 |
+
from numba import cuda
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
def boolean_func(A, vertial):
|
| 7 |
+
if vertial:
|
| 8 |
+
A[0] = 123
|
| 9 |
+
else:
|
| 10 |
+
A[0] = 321
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class TestCudaBoolean(CUDATestCase):
|
| 14 |
+
def test_boolean(self):
|
| 15 |
+
func = cuda.jit('void(float64[:], bool_)')(boolean_func)
|
| 16 |
+
A = np.array([0], dtype='float64')
|
| 17 |
+
func[1, 1](A, True)
|
| 18 |
+
self.assertTrue(A[0] == 123)
|
| 19 |
+
func[1, 1](A, False)
|
| 20 |
+
self.assertTrue(A[0] == 321)
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
if __name__ == '__main__':
|
| 24 |
+
unittest.main()
|
lib/python3.10/site-packages/numba/cuda/tests/cudapy/test_cffi.py
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
|
| 3 |
+
from numba import cuda, types
|
| 4 |
+
from numba.cuda.testing import (skip_on_cudasim, test_data_dir, unittest,
|
| 5 |
+
CUDATestCase)
|
| 6 |
+
from numba.tests.support import skip_unless_cffi
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
@skip_unless_cffi
|
| 10 |
+
@skip_on_cudasim('Simulator does not support linking')
|
| 11 |
+
class TestCFFI(CUDATestCase):
|
| 12 |
+
def test_from_buffer(self):
|
| 13 |
+
import cffi
|
| 14 |
+
ffi = cffi.FFI()
|
| 15 |
+
|
| 16 |
+
link = str(test_data_dir / 'jitlink.ptx')
|
| 17 |
+
sig = types.void(types.CPointer(types.int32))
|
| 18 |
+
array_mutator = cuda.declare_device('array_mutator', sig)
|
| 19 |
+
|
| 20 |
+
@cuda.jit(link=[link])
|
| 21 |
+
def mutate_array(x):
|
| 22 |
+
x_ptr = ffi.from_buffer(x)
|
| 23 |
+
array_mutator(x_ptr)
|
| 24 |
+
|
| 25 |
+
x = np.arange(2).astype(np.int32)
|
| 26 |
+
mutate_array[1, 1](x)
|
| 27 |
+
|
| 28 |
+
# The foreign function should have copied element 1 to element 0
|
| 29 |
+
self.assertEqual(x[0], x[1])
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
if __name__ == '__main__':
|
| 33 |
+
unittest.main()
|
lib/python3.10/site-packages/numba/cuda/tests/cudapy/test_debuginfo.py
ADDED
|
@@ -0,0 +1,221 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from numba.tests.support import override_config
|
| 2 |
+
from numba.cuda.testing import skip_on_cudasim
|
| 3 |
+
from numba import cuda
|
| 4 |
+
from numba.core import types
|
| 5 |
+
from numba.cuda.testing import CUDATestCase
|
| 6 |
+
import itertools
|
| 7 |
+
import re
|
| 8 |
+
import unittest
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
@skip_on_cudasim('Simulator does not produce debug dumps')
|
| 12 |
+
class TestCudaDebugInfo(CUDATestCase):
|
| 13 |
+
"""
|
| 14 |
+
These tests only checks the compiled PTX for debuginfo section
|
| 15 |
+
"""
|
| 16 |
+
|
| 17 |
+
def setUp(self):
|
| 18 |
+
super().setUp()
|
| 19 |
+
# If we're using LTO then we can't check the PTX in these tests,
|
| 20 |
+
# because we produce LTO-IR, which is opaque to the user.
|
| 21 |
+
# Additionally, LTO optimizes away the exception status due to an
|
| 22 |
+
# oversight in the way we generate it (it is not added to the used
|
| 23 |
+
# list).
|
| 24 |
+
self.skip_if_lto("Exceptions not supported with LTO")
|
| 25 |
+
|
| 26 |
+
def _getasm(self, fn, sig):
|
| 27 |
+
fn.compile(sig)
|
| 28 |
+
return fn.inspect_asm(sig)
|
| 29 |
+
|
| 30 |
+
def _check(self, fn, sig, expect):
|
| 31 |
+
asm = self._getasm(fn, sig=sig)
|
| 32 |
+
re_section_dbginfo = re.compile(r"\.section\s+\.debug_info\s+{")
|
| 33 |
+
match = re_section_dbginfo.search(asm)
|
| 34 |
+
assertfn = self.assertIsNotNone if expect else self.assertIsNone
|
| 35 |
+
assertfn(match, msg=asm)
|
| 36 |
+
|
| 37 |
+
def test_no_debuginfo_in_asm(self):
|
| 38 |
+
@cuda.jit(debug=False)
|
| 39 |
+
def foo(x):
|
| 40 |
+
x[0] = 1
|
| 41 |
+
|
| 42 |
+
self._check(foo, sig=(types.int32[:],), expect=False)
|
| 43 |
+
|
| 44 |
+
def test_debuginfo_in_asm(self):
|
| 45 |
+
@cuda.jit(debug=True, opt=False)
|
| 46 |
+
def foo(x):
|
| 47 |
+
x[0] = 1
|
| 48 |
+
|
| 49 |
+
self._check(foo, sig=(types.int32[:],), expect=True)
|
| 50 |
+
|
| 51 |
+
def test_environment_override(self):
|
| 52 |
+
with override_config('CUDA_DEBUGINFO_DEFAULT', 1):
|
| 53 |
+
# Using default value
|
| 54 |
+
@cuda.jit(opt=False)
|
| 55 |
+
def foo(x):
|
| 56 |
+
x[0] = 1
|
| 57 |
+
|
| 58 |
+
self._check(foo, sig=(types.int32[:],), expect=True)
|
| 59 |
+
|
| 60 |
+
# User override default value
|
| 61 |
+
@cuda.jit(debug=False)
|
| 62 |
+
def bar(x):
|
| 63 |
+
x[0] = 1
|
| 64 |
+
|
| 65 |
+
self._check(bar, sig=(types.int32[:],), expect=False)
|
| 66 |
+
|
| 67 |
+
def test_issue_5835(self):
|
| 68 |
+
# Invalid debug metadata would segfault NVVM when any function was
|
| 69 |
+
# compiled with debug turned on and optimization off. This eager
|
| 70 |
+
# compilation should not crash anything.
|
| 71 |
+
@cuda.jit((types.int32[::1],), debug=True, opt=False)
|
| 72 |
+
def f(x):
|
| 73 |
+
x[0] = 0
|
| 74 |
+
|
| 75 |
+
def test_wrapper_has_debuginfo(self):
|
| 76 |
+
sig = (types.int32[::1],)
|
| 77 |
+
|
| 78 |
+
@cuda.jit(sig, debug=True, opt=0)
|
| 79 |
+
def f(x):
|
| 80 |
+
x[0] = 1
|
| 81 |
+
|
| 82 |
+
llvm_ir = f.inspect_llvm(sig)
|
| 83 |
+
|
| 84 |
+
defines = [line for line in llvm_ir.splitlines()
|
| 85 |
+
if 'define void @"_ZN6cudapy' in line]
|
| 86 |
+
|
| 87 |
+
# Make sure we only found one definition
|
| 88 |
+
self.assertEqual(len(defines), 1)
|
| 89 |
+
|
| 90 |
+
wrapper_define = defines[0]
|
| 91 |
+
self.assertIn('!dbg', wrapper_define)
|
| 92 |
+
|
| 93 |
+
def test_debug_function_calls_internal_impl(self):
|
| 94 |
+
# Calling a function in a module generated from an implementation
|
| 95 |
+
# internal to Numba requires multiple modules to be compiled with NVVM -
|
| 96 |
+
# the internal implementation, and the caller. This example uses two
|
| 97 |
+
# modules because the `in (2, 3)` is implemented with:
|
| 98 |
+
#
|
| 99 |
+
# numba::cpython::listobj::in_seq::$3clocals$3e::seq_contains_impl$242(
|
| 100 |
+
# UniTuple<long long, 2>,
|
| 101 |
+
# int
|
| 102 |
+
# )
|
| 103 |
+
#
|
| 104 |
+
# This is condensed from this reproducer in Issue 5311:
|
| 105 |
+
# https://github.com/numba/numba/issues/5311#issuecomment-674206587
|
| 106 |
+
|
| 107 |
+
@cuda.jit((types.int32[:], types.int32[:]), debug=True, opt=False)
|
| 108 |
+
def f(inp, outp):
|
| 109 |
+
outp[0] = 1 if inp[0] in (2, 3) else 3
|
| 110 |
+
|
| 111 |
+
def test_debug_function_calls_device_function(self):
|
| 112 |
+
# Calling a device function requires compilation of multiple modules
|
| 113 |
+
# with NVVM - one for the caller and one for the callee. This checks
|
| 114 |
+
# that we don't cause an NVVM error in this case.
|
| 115 |
+
|
| 116 |
+
@cuda.jit(device=True, debug=True, opt=0)
|
| 117 |
+
def threadid():
|
| 118 |
+
return cuda.blockDim.x * cuda.blockIdx.x + cuda.threadIdx.x
|
| 119 |
+
|
| 120 |
+
@cuda.jit((types.int32[:],), debug=True, opt=0)
|
| 121 |
+
def kernel(arr):
|
| 122 |
+
i = cuda.grid(1)
|
| 123 |
+
if i < len(arr):
|
| 124 |
+
arr[i] = threadid()
|
| 125 |
+
|
| 126 |
+
def _test_chained_device_function(self, kernel_debug, f1_debug, f2_debug):
|
| 127 |
+
@cuda.jit(device=True, debug=f2_debug, opt=False)
|
| 128 |
+
def f2(x):
|
| 129 |
+
return x + 1
|
| 130 |
+
|
| 131 |
+
@cuda.jit(device=True, debug=f1_debug, opt=False)
|
| 132 |
+
def f1(x, y):
|
| 133 |
+
return x - f2(y)
|
| 134 |
+
|
| 135 |
+
@cuda.jit((types.int32, types.int32), debug=kernel_debug, opt=False)
|
| 136 |
+
def kernel(x, y):
|
| 137 |
+
f1(x, y)
|
| 138 |
+
|
| 139 |
+
kernel[1, 1](1, 2)
|
| 140 |
+
|
| 141 |
+
def test_chained_device_function(self):
|
| 142 |
+
# Calling a device function that calls another device function from a
|
| 143 |
+
# kernel with should succeed regardless of which jit decorators have
|
| 144 |
+
# debug=True. See Issue #7159.
|
| 145 |
+
|
| 146 |
+
debug_opts = itertools.product(*[(True, False)] * 3)
|
| 147 |
+
|
| 148 |
+
for kernel_debug, f1_debug, f2_debug in debug_opts:
|
| 149 |
+
with self.subTest(kernel_debug=kernel_debug,
|
| 150 |
+
f1_debug=f1_debug,
|
| 151 |
+
f2_debug=f2_debug):
|
| 152 |
+
self._test_chained_device_function(kernel_debug,
|
| 153 |
+
f1_debug,
|
| 154 |
+
f2_debug)
|
| 155 |
+
|
| 156 |
+
def _test_chained_device_function_two_calls(self, kernel_debug, f1_debug,
|
| 157 |
+
f2_debug):
|
| 158 |
+
|
| 159 |
+
@cuda.jit(device=True, debug=f2_debug, opt=False)
|
| 160 |
+
def f2(x):
|
| 161 |
+
return x + 1
|
| 162 |
+
|
| 163 |
+
@cuda.jit(device=True, debug=f1_debug, opt=False)
|
| 164 |
+
def f1(x, y):
|
| 165 |
+
return x - f2(y)
|
| 166 |
+
|
| 167 |
+
@cuda.jit(debug=kernel_debug, opt=False)
|
| 168 |
+
def kernel(x, y):
|
| 169 |
+
f1(x, y)
|
| 170 |
+
f2(x)
|
| 171 |
+
|
| 172 |
+
kernel[1, 1](1, 2)
|
| 173 |
+
|
| 174 |
+
def test_chained_device_function_two_calls(self):
|
| 175 |
+
# Calling a device function that calls a leaf device function from a
|
| 176 |
+
# kernel, and calling the leaf device function from the kernel should
|
| 177 |
+
# succeed, regardless of which jit decorators have debug=True. See
|
| 178 |
+
# Issue #7159.
|
| 179 |
+
|
| 180 |
+
debug_opts = itertools.product(*[(True, False)] * 3)
|
| 181 |
+
|
| 182 |
+
for kernel_debug, f1_debug, f2_debug in debug_opts:
|
| 183 |
+
with self.subTest(kernel_debug=kernel_debug,
|
| 184 |
+
f1_debug=f1_debug,
|
| 185 |
+
f2_debug=f2_debug):
|
| 186 |
+
self._test_chained_device_function_two_calls(kernel_debug,
|
| 187 |
+
f1_debug,
|
| 188 |
+
f2_debug)
|
| 189 |
+
|
| 190 |
+
def test_chained_device_three_functions(self):
|
| 191 |
+
# Like test_chained_device_function, but with enough functions (three)
|
| 192 |
+
# to ensure that the recursion visits all the way down the call tree
|
| 193 |
+
# when fixing linkage of functions for debug.
|
| 194 |
+
def three_device_fns(kernel_debug, leaf_debug):
|
| 195 |
+
@cuda.jit(device=True, debug=leaf_debug, opt=False)
|
| 196 |
+
def f3(x):
|
| 197 |
+
return x * x
|
| 198 |
+
|
| 199 |
+
@cuda.jit(device=True)
|
| 200 |
+
def f2(x):
|
| 201 |
+
return f3(x) + 1
|
| 202 |
+
|
| 203 |
+
@cuda.jit(device=True)
|
| 204 |
+
def f1(x, y):
|
| 205 |
+
return x - f2(y)
|
| 206 |
+
|
| 207 |
+
@cuda.jit(debug=kernel_debug, opt=False)
|
| 208 |
+
def kernel(x, y):
|
| 209 |
+
f1(x, y)
|
| 210 |
+
|
| 211 |
+
kernel[1, 1](1, 2)
|
| 212 |
+
|
| 213 |
+
# Check when debug on the kernel, on the leaf, and not on any function.
|
| 214 |
+
three_device_fns(kernel_debug=True, leaf_debug=True)
|
| 215 |
+
three_device_fns(kernel_debug=True, leaf_debug=False)
|
| 216 |
+
three_device_fns(kernel_debug=False, leaf_debug=True)
|
| 217 |
+
three_device_fns(kernel_debug=False, leaf_debug=False)
|
| 218 |
+
|
| 219 |
+
|
| 220 |
+
if __name__ == '__main__':
|
| 221 |
+
unittest.main()
|
lib/python3.10/site-packages/numba/cuda/tests/cudapy/test_exception.py
ADDED
|
@@ -0,0 +1,174 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
|
| 3 |
+
from numba import cuda
|
| 4 |
+
from numba.cuda.testing import unittest, xfail_unless_cudasim, CUDATestCase
|
| 5 |
+
from numba.core import config
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class TestException(CUDATestCase):
|
| 9 |
+
def setUp(self):
|
| 10 |
+
super().setUp()
|
| 11 |
+
# LTO optimizes away the exception status due to an oversight
|
| 12 |
+
# in the way we generate it (it is not added to the used list).
|
| 13 |
+
self.skip_if_lto("Exceptions not supported with LTO")
|
| 14 |
+
|
| 15 |
+
def test_exception(self):
|
| 16 |
+
def foo(ary):
|
| 17 |
+
x = cuda.threadIdx.x
|
| 18 |
+
if x == 2:
|
| 19 |
+
# NOTE: indexing with a out-of-bounds constant can fail at
|
| 20 |
+
# compile-time instead (because the getitem is rewritten as a
|
| 21 |
+
# static_getitem)
|
| 22 |
+
ary.shape[-x]
|
| 23 |
+
|
| 24 |
+
unsafe_foo = cuda.jit(foo)
|
| 25 |
+
safe_foo = cuda.jit(debug=True, opt=False)(foo)
|
| 26 |
+
|
| 27 |
+
if not config.ENABLE_CUDASIM:
|
| 28 |
+
# Simulator throws exceptions regardless of debug
|
| 29 |
+
# setting
|
| 30 |
+
unsafe_foo[1, 3](np.array([0, 1]))
|
| 31 |
+
|
| 32 |
+
with self.assertRaises(IndexError) as cm:
|
| 33 |
+
safe_foo[1, 3](np.array([0, 1]))
|
| 34 |
+
self.assertIn("tuple index out of range", str(cm.exception))
|
| 35 |
+
|
| 36 |
+
def test_user_raise(self):
|
| 37 |
+
@cuda.jit(debug=True, opt=False)
|
| 38 |
+
def foo(do_raise):
|
| 39 |
+
if do_raise:
|
| 40 |
+
raise ValueError
|
| 41 |
+
|
| 42 |
+
foo[1, 1](False)
|
| 43 |
+
with self.assertRaises(ValueError):
|
| 44 |
+
foo[1, 1](True)
|
| 45 |
+
|
| 46 |
+
def case_raise_causing_warp_diverge(self, with_debug_mode):
|
| 47 |
+
"""Testing issue #2655.
|
| 48 |
+
|
| 49 |
+
Exception raising code can cause the compiler to miss location
|
| 50 |
+
of unifying branch target and resulting in unexpected warp
|
| 51 |
+
divergence.
|
| 52 |
+
"""
|
| 53 |
+
with_opt_mode = not with_debug_mode
|
| 54 |
+
|
| 55 |
+
@cuda.jit(debug=with_debug_mode, opt=with_opt_mode)
|
| 56 |
+
def problematic(x, y):
|
| 57 |
+
tid = cuda.threadIdx.x
|
| 58 |
+
ntid = cuda.blockDim.x
|
| 59 |
+
|
| 60 |
+
if tid > 12:
|
| 61 |
+
for i in range(ntid):
|
| 62 |
+
y[i] += x[i] // y[i]
|
| 63 |
+
|
| 64 |
+
cuda.syncthreads()
|
| 65 |
+
if tid < 17:
|
| 66 |
+
for i in range(ntid):
|
| 67 |
+
x[i] += x[i] // y[i]
|
| 68 |
+
|
| 69 |
+
@cuda.jit
|
| 70 |
+
def oracle(x, y):
|
| 71 |
+
tid = cuda.threadIdx.x
|
| 72 |
+
ntid = cuda.blockDim.x
|
| 73 |
+
|
| 74 |
+
if tid > 12:
|
| 75 |
+
for i in range(ntid):
|
| 76 |
+
if y[i] != 0:
|
| 77 |
+
y[i] += x[i] // y[i]
|
| 78 |
+
|
| 79 |
+
cuda.syncthreads()
|
| 80 |
+
if tid < 17:
|
| 81 |
+
for i in range(ntid):
|
| 82 |
+
if y[i] != 0:
|
| 83 |
+
x[i] += x[i] // y[i]
|
| 84 |
+
|
| 85 |
+
n = 32
|
| 86 |
+
got_x = 1. / (np.arange(n) + 0.01)
|
| 87 |
+
got_y = 1. / (np.arange(n) + 0.01)
|
| 88 |
+
problematic[1, n](got_x, got_y)
|
| 89 |
+
|
| 90 |
+
expect_x = 1. / (np.arange(n) + 0.01)
|
| 91 |
+
expect_y = 1. / (np.arange(n) + 0.01)
|
| 92 |
+
oracle[1, n](expect_x, expect_y)
|
| 93 |
+
|
| 94 |
+
np.testing.assert_almost_equal(expect_x, got_x)
|
| 95 |
+
np.testing.assert_almost_equal(expect_y, got_y)
|
| 96 |
+
|
| 97 |
+
def test_raise_causing_warp_diverge(self):
|
| 98 |
+
"""Test case for issue #2655.
|
| 99 |
+
"""
|
| 100 |
+
self.case_raise_causing_warp_diverge(with_debug_mode=False)
|
| 101 |
+
|
| 102 |
+
# The following two cases relate to Issue #7806: Division by zero stops the
|
| 103 |
+
# kernel. https://github.com/numba/numba/issues/7806.
|
| 104 |
+
|
| 105 |
+
def test_no_zero_division_error(self):
|
| 106 |
+
# When debug is False:
|
| 107 |
+
# - Division by zero raises no exception
|
| 108 |
+
# - Execution proceeds after a divide by zero
|
| 109 |
+
@cuda.jit
|
| 110 |
+
def f(r, x, y):
|
| 111 |
+
r[0] = y[0] / x[0]
|
| 112 |
+
r[1] = y[0]
|
| 113 |
+
|
| 114 |
+
r = np.zeros(2)
|
| 115 |
+
x = np.zeros(1)
|
| 116 |
+
y = np.ones(1)
|
| 117 |
+
|
| 118 |
+
f[1, 1](r, x, y)
|
| 119 |
+
|
| 120 |
+
self.assertTrue(np.isinf(r[0]), 'Expected inf from div by zero')
|
| 121 |
+
self.assertEqual(r[1], y[0], 'Expected execution to continue')
|
| 122 |
+
|
| 123 |
+
def test_zero_division_error_in_debug(self):
|
| 124 |
+
# When debug is True:
|
| 125 |
+
# - Zero by division raises an exception
|
| 126 |
+
# - Execution halts at the point of division by zero
|
| 127 |
+
@cuda.jit(debug=True, opt=False)
|
| 128 |
+
def f(r, x, y):
|
| 129 |
+
r[0] = y[0] / x[0]
|
| 130 |
+
r[1] = y[0]
|
| 131 |
+
|
| 132 |
+
r = np.zeros(2)
|
| 133 |
+
x = np.zeros(1)
|
| 134 |
+
y = np.ones(1)
|
| 135 |
+
|
| 136 |
+
# Simulator and device behaviour differs slightly in the exception
|
| 137 |
+
# raised - in debug mode, the CUDA target uses the Python error model,
|
| 138 |
+
# which gives a ZeroDivision error. The simulator uses NumPy with the
|
| 139 |
+
# error mode for division by zero set to raise, which results in a
|
| 140 |
+
# FloatingPointError instead.
|
| 141 |
+
if config.ENABLE_CUDASIM:
|
| 142 |
+
exc = FloatingPointError
|
| 143 |
+
else:
|
| 144 |
+
exc = ZeroDivisionError
|
| 145 |
+
|
| 146 |
+
with self.assertRaises(exc):
|
| 147 |
+
f[1, 1](r, x, y)
|
| 148 |
+
|
| 149 |
+
self.assertEqual(r[0], 0, 'Expected result to be left unset')
|
| 150 |
+
self.assertEqual(r[1], 0, 'Expected execution to stop')
|
| 151 |
+
|
| 152 |
+
@xfail_unless_cudasim
|
| 153 |
+
def test_raise_in_device_function(self):
|
| 154 |
+
# This is an expected failure because reporting of exceptions raised in
|
| 155 |
+
# device functions does not work correctly - see Issue #8036:
|
| 156 |
+
# https://github.com/numba/numba/issues/8036
|
| 157 |
+
msg = 'Device Function Error'
|
| 158 |
+
|
| 159 |
+
@cuda.jit(device=True)
|
| 160 |
+
def f():
|
| 161 |
+
raise ValueError(msg)
|
| 162 |
+
|
| 163 |
+
@cuda.jit(debug=True)
|
| 164 |
+
def kernel():
|
| 165 |
+
f()
|
| 166 |
+
|
| 167 |
+
with self.assertRaises(ValueError) as raises:
|
| 168 |
+
kernel[1, 1]()
|
| 169 |
+
|
| 170 |
+
self.assertIn(msg, str(raises.exception))
|
| 171 |
+
|
| 172 |
+
|
| 173 |
+
if __name__ == '__main__':
|
| 174 |
+
unittest.main()
|
lib/python3.10/site-packages/numba/cuda/tests/cudapy/test_freevar.py
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
|
| 3 |
+
from numba import cuda
|
| 4 |
+
from numba.cuda.testing import unittest, CUDATestCase
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class TestFreeVar(CUDATestCase):
|
| 8 |
+
def test_freevar(self):
|
| 9 |
+
"""Make sure we can compile the following kernel with freevar reference
|
| 10 |
+
in arguments to shared.array
|
| 11 |
+
"""
|
| 12 |
+
from numba import float32
|
| 13 |
+
|
| 14 |
+
size = 1024
|
| 15 |
+
nbtype = float32
|
| 16 |
+
|
| 17 |
+
@cuda.jit("(float32[::1], intp)")
|
| 18 |
+
def foo(A, i):
|
| 19 |
+
"Dummy function"
|
| 20 |
+
sdata = cuda.shared.array(size, # size is freevar
|
| 21 |
+
dtype=nbtype) # nbtype is freevar
|
| 22 |
+
A[i] = sdata[i]
|
| 23 |
+
|
| 24 |
+
A = np.arange(2, dtype="float32")
|
| 25 |
+
foo[1, 1](A, 0)
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
if __name__ == '__main__':
|
| 29 |
+
unittest.main()
|
lib/python3.10/site-packages/numba/cuda/tests/cudapy/test_gufunc_scheduling.py
ADDED
|
@@ -0,0 +1,95 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from numba.cuda.deviceufunc import GUFuncEngine
|
| 2 |
+
import unittest
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
def template(signature, shapes, expects):
|
| 6 |
+
gufb = GUFuncEngine.from_signature(signature)
|
| 7 |
+
sch = gufb.schedule(shapes)
|
| 8 |
+
for k, v in expects.items():
|
| 9 |
+
got = getattr(sch, k)
|
| 10 |
+
if got != v:
|
| 11 |
+
fmt = 'error for %s: got=%s but expect=%s'
|
| 12 |
+
raise AssertionError(fmt % (k, got, v))
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class TestGUFuncScheduling(unittest.TestCase):
|
| 16 |
+
def test_signature_1(self):
|
| 17 |
+
signature = '(m, n), (n, p) -> (m, p)'
|
| 18 |
+
shapes = (100, 4, 5), (1, 5, 7)
|
| 19 |
+
expects = dict(
|
| 20 |
+
ishapes=[(4, 5), (5, 7)],
|
| 21 |
+
oshapes=[(4, 7)],
|
| 22 |
+
loopdims=(100,),
|
| 23 |
+
pinned=[False, True]
|
| 24 |
+
)
|
| 25 |
+
template(signature, shapes, expects)
|
| 26 |
+
|
| 27 |
+
def test_signature_2(self):
|
| 28 |
+
signature = '(m, n), (n, p) -> (m, p)'
|
| 29 |
+
shapes = (100, 4, 5), (100, 5, 7)
|
| 30 |
+
expects = dict(
|
| 31 |
+
ishapes=[(4, 5), (5, 7)],
|
| 32 |
+
oshapes=[(4, 7)],
|
| 33 |
+
loopdims=(100,),
|
| 34 |
+
pinned=[False, False]
|
| 35 |
+
)
|
| 36 |
+
template(signature, shapes, expects)
|
| 37 |
+
|
| 38 |
+
def test_signature_3(self):
|
| 39 |
+
signature = '(m, n), (n, p) -> (m, p)'
|
| 40 |
+
shapes = (12, 34, 4, 5), (12, 34, 5, 7)
|
| 41 |
+
expects = dict(
|
| 42 |
+
ishapes=[(4, 5), (5, 7)],
|
| 43 |
+
oshapes=[(4, 7)],
|
| 44 |
+
loopdims=(12, 34),
|
| 45 |
+
pinned=[False, False]
|
| 46 |
+
)
|
| 47 |
+
template(signature, shapes, expects)
|
| 48 |
+
|
| 49 |
+
def test_signature_4(self):
|
| 50 |
+
signature = '(m, n), (n, p) -> (m, p)'
|
| 51 |
+
shapes = (4, 5), (5, 7)
|
| 52 |
+
expects = dict(
|
| 53 |
+
ishapes=[(4, 5), (5, 7)],
|
| 54 |
+
oshapes=[(4, 7)],
|
| 55 |
+
loopdims=(),
|
| 56 |
+
pinned=[False, False]
|
| 57 |
+
)
|
| 58 |
+
template(signature, shapes, expects)
|
| 59 |
+
|
| 60 |
+
def test_signature_5(self):
|
| 61 |
+
signature = '(a), (a) -> (a)'
|
| 62 |
+
shapes = (5,), (5,)
|
| 63 |
+
expects = dict(
|
| 64 |
+
ishapes=[(5,), (5,)],
|
| 65 |
+
oshapes=[(5,)],
|
| 66 |
+
loopdims=(),
|
| 67 |
+
pinned=[False, False]
|
| 68 |
+
)
|
| 69 |
+
template(signature, shapes, expects)
|
| 70 |
+
|
| 71 |
+
def test_signature_6(self):
|
| 72 |
+
signature = '(), () -> ()'
|
| 73 |
+
shapes = (5,), (5,)
|
| 74 |
+
expects = dict(
|
| 75 |
+
ishapes=[(), ()],
|
| 76 |
+
oshapes=[()],
|
| 77 |
+
loopdims=(5,),
|
| 78 |
+
pinned=[False, False]
|
| 79 |
+
)
|
| 80 |
+
template(signature, shapes, expects)
|
| 81 |
+
|
| 82 |
+
def test_signature_7(self):
|
| 83 |
+
signature = '(), () -> ()'
|
| 84 |
+
shapes = (5,), ()
|
| 85 |
+
expects = dict(
|
| 86 |
+
ishapes=[(), ()],
|
| 87 |
+
oshapes=[()],
|
| 88 |
+
loopdims=(5,),
|
| 89 |
+
pinned=[False, True]
|
| 90 |
+
)
|
| 91 |
+
template(signature, shapes, expects)
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
if __name__ == '__main__':
|
| 95 |
+
unittest.main()
|
lib/python3.10/site-packages/numba/cuda/tests/cudapy/test_iterators.py
ADDED
|
@@ -0,0 +1,99 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from numba import cuda
|
| 2 |
+
from numba.cuda.testing import unittest, CUDATestCase
|
| 3 |
+
|
| 4 |
+
import numpy as np
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class TestIterators(CUDATestCase):
|
| 8 |
+
|
| 9 |
+
def test_enumerate(self):
|
| 10 |
+
@cuda.jit
|
| 11 |
+
def enumerator(x, error):
|
| 12 |
+
count = 0
|
| 13 |
+
|
| 14 |
+
for i, v in enumerate(x):
|
| 15 |
+
if count != i:
|
| 16 |
+
error[0] = 1
|
| 17 |
+
if v != x[i]:
|
| 18 |
+
error[0] = 2
|
| 19 |
+
|
| 20 |
+
count += 1
|
| 21 |
+
|
| 22 |
+
if count != len(x):
|
| 23 |
+
error[0] = 3
|
| 24 |
+
|
| 25 |
+
x = np.asarray((10, 9, 8, 7, 6))
|
| 26 |
+
error = np.zeros(1, dtype=np.int32)
|
| 27 |
+
|
| 28 |
+
enumerator[1, 1](x, error)
|
| 29 |
+
self.assertEqual(error[0], 0)
|
| 30 |
+
|
| 31 |
+
def _test_twoarg_function(self, f):
|
| 32 |
+
x = np.asarray((10, 9, 8, 7, 6))
|
| 33 |
+
y = np.asarray((1, 2, 3, 4, 5))
|
| 34 |
+
error = np.zeros(1, dtype=np.int32)
|
| 35 |
+
|
| 36 |
+
f[1, 1](x, y, error)
|
| 37 |
+
self.assertEqual(error[0], 0)
|
| 38 |
+
|
| 39 |
+
def test_zip(self):
|
| 40 |
+
@cuda.jit
|
| 41 |
+
def zipper(x, y, error):
|
| 42 |
+
i = 0
|
| 43 |
+
|
| 44 |
+
for xv, yv in zip(x, y):
|
| 45 |
+
if xv != x[i]:
|
| 46 |
+
error[0] = 1
|
| 47 |
+
if yv != y[i]:
|
| 48 |
+
error[0] = 2
|
| 49 |
+
|
| 50 |
+
i += 1
|
| 51 |
+
|
| 52 |
+
if i != len(x):
|
| 53 |
+
error[0] = 3
|
| 54 |
+
|
| 55 |
+
self._test_twoarg_function(zipper)
|
| 56 |
+
|
| 57 |
+
def test_enumerate_zip(self):
|
| 58 |
+
@cuda.jit
|
| 59 |
+
def enumerator_zipper(x, y, error):
|
| 60 |
+
count = 0
|
| 61 |
+
|
| 62 |
+
for i, (xv, yv) in enumerate(zip(x, y)):
|
| 63 |
+
if i != count:
|
| 64 |
+
error[0] = 1
|
| 65 |
+
if xv != x[i]:
|
| 66 |
+
error[0] = 2
|
| 67 |
+
if yv != y[i]:
|
| 68 |
+
error[0] = 3
|
| 69 |
+
|
| 70 |
+
count += 1
|
| 71 |
+
|
| 72 |
+
if count != len(x):
|
| 73 |
+
error[0] = 4
|
| 74 |
+
|
| 75 |
+
self._test_twoarg_function(enumerator_zipper)
|
| 76 |
+
|
| 77 |
+
def test_zip_enumerate(self):
|
| 78 |
+
@cuda.jit
|
| 79 |
+
def zipper_enumerator(x, y, error):
|
| 80 |
+
count = 0
|
| 81 |
+
|
| 82 |
+
for (i, xv), yv in zip(enumerate(x), y):
|
| 83 |
+
if i != count:
|
| 84 |
+
error[0] = 1
|
| 85 |
+
if xv != x[i]:
|
| 86 |
+
error[0] = 2
|
| 87 |
+
if yv != y[i]:
|
| 88 |
+
error[0] = 3
|
| 89 |
+
|
| 90 |
+
count += 1
|
| 91 |
+
|
| 92 |
+
if count != len(x):
|
| 93 |
+
error[0] = 4
|
| 94 |
+
|
| 95 |
+
self._test_twoarg_function(zipper_enumerator)
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
if __name__ == '__main__':
|
| 99 |
+
unittest.main()
|
lib/python3.10/site-packages/numba/cuda/tests/cudapy/test_sm_creation.py
ADDED
|
@@ -0,0 +1,205 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
from numba import cuda, float32, int32, void
|
| 3 |
+
from numba.core.errors import TypingError
|
| 4 |
+
from numba.cuda.testing import unittest, CUDATestCase
|
| 5 |
+
from numba.cuda.testing import skip_on_cudasim
|
| 6 |
+
from .extensions_usecases import test_struct_model_type
|
| 7 |
+
|
| 8 |
+
GLOBAL_CONSTANT = 5
|
| 9 |
+
GLOBAL_CONSTANT_2 = 6
|
| 10 |
+
GLOBAL_CONSTANT_TUPLE = 5, 6
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def udt_global_constants(A):
|
| 14 |
+
sa = cuda.shared.array(shape=GLOBAL_CONSTANT, dtype=float32)
|
| 15 |
+
i = cuda.grid(1)
|
| 16 |
+
A[i] = sa[i]
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def udt_global_build_tuple(A):
|
| 20 |
+
sa = cuda.shared.array(shape=(GLOBAL_CONSTANT, GLOBAL_CONSTANT_2),
|
| 21 |
+
dtype=float32)
|
| 22 |
+
i, j = cuda.grid(2)
|
| 23 |
+
A[i, j] = sa[i, j]
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def udt_global_build_list(A):
|
| 27 |
+
sa = cuda.shared.array(shape=[GLOBAL_CONSTANT, GLOBAL_CONSTANT_2],
|
| 28 |
+
dtype=float32)
|
| 29 |
+
i, j = cuda.grid(2)
|
| 30 |
+
A[i, j] = sa[i, j]
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def udt_global_constant_tuple(A):
|
| 34 |
+
sa = cuda.shared.array(shape=GLOBAL_CONSTANT_TUPLE, dtype=float32)
|
| 35 |
+
i, j = cuda.grid(2)
|
| 36 |
+
A[i, j] = sa[i, j]
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def udt_invalid_1(A):
|
| 40 |
+
sa = cuda.shared.array(shape=A[0], dtype=float32)
|
| 41 |
+
i = cuda.grid(1)
|
| 42 |
+
A[i] = sa[i]
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
def udt_invalid_2(A):
|
| 46 |
+
sa = cuda.shared.array(shape=(1, A[0]), dtype=float32)
|
| 47 |
+
i, j = cuda.grid(2)
|
| 48 |
+
A[i, j] = sa[i, j]
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
def udt_invalid_3(A):
|
| 52 |
+
sa = cuda.shared.array(shape=(1, A[0]), dtype=float32)
|
| 53 |
+
i = cuda.grid(1)
|
| 54 |
+
A[i] = sa[i, 0]
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
class TestSharedMemoryCreation(CUDATestCase):
|
| 58 |
+
def getarg(self):
|
| 59 |
+
return np.array(100, dtype=np.float32, ndmin=1)
|
| 60 |
+
|
| 61 |
+
def getarg2(self):
|
| 62 |
+
return self.getarg().reshape(1,1)
|
| 63 |
+
|
| 64 |
+
def test_global_constants(self):
|
| 65 |
+
udt = cuda.jit((float32[:],))(udt_global_constants)
|
| 66 |
+
udt[1, 1](self.getarg())
|
| 67 |
+
|
| 68 |
+
def test_global_build_tuple(self):
|
| 69 |
+
udt = cuda.jit((float32[:, :],))(udt_global_build_tuple)
|
| 70 |
+
udt[1, 1](self.getarg2())
|
| 71 |
+
|
| 72 |
+
@skip_on_cudasim('Simulator does not prohibit lists for shared array shape')
|
| 73 |
+
def test_global_build_list(self):
|
| 74 |
+
with self.assertRaises(TypingError) as raises:
|
| 75 |
+
cuda.jit((float32[:, :],))(udt_global_build_list)
|
| 76 |
+
|
| 77 |
+
self.assertIn("No implementation of function "
|
| 78 |
+
"Function(<function shared.array",
|
| 79 |
+
str(raises.exception))
|
| 80 |
+
self.assertIn("found for signature:\n \n "
|
| 81 |
+
">>> array(shape=list(int64)<iv=[5, 6]>, "
|
| 82 |
+
"dtype=class(float32)",
|
| 83 |
+
str(raises.exception))
|
| 84 |
+
|
| 85 |
+
def test_global_constant_tuple(self):
|
| 86 |
+
udt = cuda.jit((float32[:, :],))(udt_global_constant_tuple)
|
| 87 |
+
udt[1, 1](self.getarg2())
|
| 88 |
+
|
| 89 |
+
@skip_on_cudasim("Can't check for constants in simulator")
|
| 90 |
+
def test_invalid_1(self):
|
| 91 |
+
# Scalar shape cannot be a floating point value
|
| 92 |
+
with self.assertRaises(TypingError) as raises:
|
| 93 |
+
cuda.jit((float32[:],))(udt_invalid_1)
|
| 94 |
+
|
| 95 |
+
self.assertIn("No implementation of function "
|
| 96 |
+
"Function(<function shared.array",
|
| 97 |
+
str(raises.exception))
|
| 98 |
+
self.assertIn("found for signature:\n \n "
|
| 99 |
+
">>> array(shape=float32, dtype=class(float32))",
|
| 100 |
+
str(raises.exception))
|
| 101 |
+
|
| 102 |
+
@skip_on_cudasim("Can't check for constants in simulator")
|
| 103 |
+
def test_invalid_2(self):
|
| 104 |
+
# Tuple shape cannot contain a floating point value
|
| 105 |
+
with self.assertRaises(TypingError) as raises:
|
| 106 |
+
cuda.jit((float32[:, :],))(udt_invalid_2)
|
| 107 |
+
|
| 108 |
+
self.assertIn("No implementation of function "
|
| 109 |
+
"Function(<function shared.array",
|
| 110 |
+
str(raises.exception))
|
| 111 |
+
self.assertIn("found for signature:\n \n "
|
| 112 |
+
">>> array(shape=Tuple(Literal[int](1), "
|
| 113 |
+
"array(float32, 1d, A)), dtype=class(float32))",
|
| 114 |
+
str(raises.exception))
|
| 115 |
+
|
| 116 |
+
@skip_on_cudasim("Can't check for constants in simulator")
|
| 117 |
+
def test_invalid_3(self):
|
| 118 |
+
# Scalar shape must be literal
|
| 119 |
+
with self.assertRaises(TypingError) as raises:
|
| 120 |
+
cuda.jit((int32[:],))(udt_invalid_1)
|
| 121 |
+
|
| 122 |
+
self.assertIn("No implementation of function "
|
| 123 |
+
"Function(<function shared.array",
|
| 124 |
+
str(raises.exception))
|
| 125 |
+
self.assertIn("found for signature:\n \n "
|
| 126 |
+
">>> array(shape=int32, dtype=class(float32))",
|
| 127 |
+
str(raises.exception))
|
| 128 |
+
|
| 129 |
+
@skip_on_cudasim("Can't check for constants in simulator")
|
| 130 |
+
def test_invalid_4(self):
|
| 131 |
+
# Tuple shape must contain only literals
|
| 132 |
+
with self.assertRaises(TypingError) as raises:
|
| 133 |
+
cuda.jit((int32[:],))(udt_invalid_3)
|
| 134 |
+
|
| 135 |
+
self.assertIn("No implementation of function "
|
| 136 |
+
"Function(<function shared.array",
|
| 137 |
+
str(raises.exception))
|
| 138 |
+
self.assertIn("found for signature:\n \n "
|
| 139 |
+
">>> array(shape=Tuple(Literal[int](1), int32), "
|
| 140 |
+
"dtype=class(float32))",
|
| 141 |
+
str(raises.exception))
|
| 142 |
+
|
| 143 |
+
def check_dtype(self, f, dtype):
|
| 144 |
+
# Find the typing of the dtype argument to cuda.shared.array
|
| 145 |
+
annotation = next(iter(f.overloads.values()))._type_annotation
|
| 146 |
+
l_dtype = annotation.typemap['s'].dtype
|
| 147 |
+
# Ensure that the typing is correct
|
| 148 |
+
self.assertEqual(l_dtype, dtype)
|
| 149 |
+
|
| 150 |
+
@skip_on_cudasim("Can't check typing in simulator")
|
| 151 |
+
def test_numba_dtype(self):
|
| 152 |
+
# Check that Numba types can be used as the dtype of a shared array
|
| 153 |
+
@cuda.jit(void(int32[::1]))
|
| 154 |
+
def f(x):
|
| 155 |
+
s = cuda.shared.array(10, dtype=int32)
|
| 156 |
+
s[0] = x[0]
|
| 157 |
+
x[0] = s[0]
|
| 158 |
+
|
| 159 |
+
self.check_dtype(f, int32)
|
| 160 |
+
|
| 161 |
+
@skip_on_cudasim("Can't check typing in simulator")
|
| 162 |
+
def test_numpy_dtype(self):
|
| 163 |
+
# Check that NumPy types can be used as the dtype of a shared array
|
| 164 |
+
@cuda.jit(void(int32[::1]))
|
| 165 |
+
def f(x):
|
| 166 |
+
s = cuda.shared.array(10, dtype=np.int32)
|
| 167 |
+
s[0] = x[0]
|
| 168 |
+
x[0] = s[0]
|
| 169 |
+
|
| 170 |
+
self.check_dtype(f, int32)
|
| 171 |
+
|
| 172 |
+
@skip_on_cudasim("Can't check typing in simulator")
|
| 173 |
+
def test_string_dtype(self):
|
| 174 |
+
# Check that strings can be used to specify the dtype of a shared array
|
| 175 |
+
@cuda.jit(void(int32[::1]))
|
| 176 |
+
def f(x):
|
| 177 |
+
s = cuda.shared.array(10, dtype='int32')
|
| 178 |
+
s[0] = x[0]
|
| 179 |
+
x[0] = s[0]
|
| 180 |
+
|
| 181 |
+
self.check_dtype(f, int32)
|
| 182 |
+
|
| 183 |
+
@skip_on_cudasim("Can't check typing in simulator")
|
| 184 |
+
def test_invalid_string_dtype(self):
|
| 185 |
+
# Check that strings of invalid dtypes cause a typing error
|
| 186 |
+
re = ".*Invalid NumPy dtype specified: 'int33'.*"
|
| 187 |
+
with self.assertRaisesRegex(TypingError, re):
|
| 188 |
+
@cuda.jit(void(int32[::1]))
|
| 189 |
+
def f(x):
|
| 190 |
+
s = cuda.shared.array(10, dtype='int33')
|
| 191 |
+
s[0] = x[0]
|
| 192 |
+
x[0] = s[0]
|
| 193 |
+
|
| 194 |
+
@skip_on_cudasim("Can't check typing in simulator")
|
| 195 |
+
def test_type_with_struct_data_model(self):
|
| 196 |
+
@cuda.jit(void(test_struct_model_type[::1]))
|
| 197 |
+
def f(x):
|
| 198 |
+
s = cuda.shared.array(10, dtype=test_struct_model_type)
|
| 199 |
+
s[0] = x[0]
|
| 200 |
+
x[0] = s[0]
|
| 201 |
+
self.check_dtype(f, test_struct_model_type)
|
| 202 |
+
|
| 203 |
+
|
| 204 |
+
if __name__ == '__main__':
|
| 205 |
+
unittest.main()
|
lib/python3.10/site-packages/numba/cuda/tests/cudapy/test_userexc.py
ADDED
|
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from numba.cuda.testing import unittest, CUDATestCase
|
| 2 |
+
from numba import cuda
|
| 3 |
+
from numba.core import config
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
class MyError(Exception):
|
| 7 |
+
pass
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
regex_pattern = (
|
| 11 |
+
r'In function [\'"]test_exc[\'"], file [\:\.\/\\\-a-zA-Z_0-9]+, line \d+'
|
| 12 |
+
)
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class TestUserExc(CUDATestCase):
|
| 16 |
+
|
| 17 |
+
def setUp(self):
|
| 18 |
+
super().setUp()
|
| 19 |
+
# LTO optimizes away the exception status due to an oversight
|
| 20 |
+
# in the way we generate it (it is not added to the used list).
|
| 21 |
+
# See https://github.com/numba/numba/issues/9526.
|
| 22 |
+
self.skip_if_lto("Exceptions not supported with LTO")
|
| 23 |
+
|
| 24 |
+
def test_user_exception(self):
|
| 25 |
+
@cuda.jit("void(int32)", debug=True)
|
| 26 |
+
def test_exc(x):
|
| 27 |
+
if x == 1:
|
| 28 |
+
raise MyError
|
| 29 |
+
elif x == 2:
|
| 30 |
+
raise MyError("foo")
|
| 31 |
+
|
| 32 |
+
test_exc[1, 1](0) # no raise
|
| 33 |
+
with self.assertRaises(MyError) as cm:
|
| 34 |
+
test_exc[1, 1](1)
|
| 35 |
+
if not config.ENABLE_CUDASIM:
|
| 36 |
+
self.assertRegex(str(cm.exception), regex_pattern)
|
| 37 |
+
self.assertIn("tid=[0, 0, 0] ctaid=[0, 0, 0]", str(cm.exception))
|
| 38 |
+
with self.assertRaises(MyError) as cm:
|
| 39 |
+
test_exc[1, 1](2)
|
| 40 |
+
if not config.ENABLE_CUDASIM:
|
| 41 |
+
self.assertRegex(str(cm.exception), regex_pattern)
|
| 42 |
+
self.assertRegex(str(cm.exception), regex_pattern)
|
| 43 |
+
self.assertIn("tid=[0, 0, 0] ctaid=[0, 0, 0]: foo", str(cm.exception))
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
if __name__ == '__main__':
|
| 47 |
+
unittest.main()
|
lib/python3.10/site-packages/tf2onnx/custom_opsets/__init__.py
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
|
| 3 |
+
""" custom tf2onnx mapping functions. """
|
| 4 |
+
|
| 5 |
+
from . import ms
|
| 6 |
+
from . import onnx_ml
|
| 7 |
+
from . import string_ops
|
lib/python3.10/site-packages/tf2onnx/custom_opsets/ms.py
ADDED
|
@@ -0,0 +1,115 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
|
| 3 |
+
""" tf2onnx mapping functions for ms domain. """
|
| 4 |
+
|
| 5 |
+
import numpy as np
|
| 6 |
+
|
| 7 |
+
from onnx import onnx_pb
|
| 8 |
+
from onnx.onnx_pb import TensorProto
|
| 9 |
+
from tf2onnx import constants, utils
|
| 10 |
+
from tf2onnx.handler import tf_op
|
| 11 |
+
from tf2onnx.onnx_opset import controlflow
|
| 12 |
+
from tf2onnx.onnx_opset.nn import conv_convert_inputs, conv_dims_attr
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
# pylint: disable=unused-argument,missing-docstring
|
| 16 |
+
|
| 17 |
+
def make_range(ctx, start, limit, delta, output, scope_name, shape, dtype):
|
| 18 |
+
if all(ctx.get_node_by_output(n).is_const() for n in [start, limit, delta]) is True:
|
| 19 |
+
controlflow.make_range_const(ctx, start, limit, delta, output, scope_name, shape, dtype)
|
| 20 |
+
else:
|
| 21 |
+
_make_range_non_const(ctx, start, limit, delta, output, scope_name, shape, dtype)
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def _make_range_non_const(ctx, start, limit, delta, output, scope_name, shape, dtype):
|
| 25 |
+
utils.make_sure(
|
| 26 |
+
dtype in [TensorProto.FLOAT, TensorProto.DOUBLE, TensorProto.INT16,
|
| 27 |
+
TensorProto.INT32, TensorProto.INT64,
|
| 28 |
+
TensorProto.COMPLEX64, TensorProto.COMPLEX128],
|
| 29 |
+
"dtype %s is not supported", dtype)
|
| 30 |
+
ctx.make_node("Range", [start, limit, delta], outputs=[output], name=scope_name, shapes=[shape], dtypes=[dtype],
|
| 31 |
+
domain=constants.MICROSOFT_DOMAIN)
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
@tf_op("Range", domain=constants.MICROSOFT_DOMAIN)
|
| 35 |
+
class Range:
|
| 36 |
+
@classmethod
|
| 37 |
+
def version_1(cls, ctx, node, **kwargs):
|
| 38 |
+
"""Range."""
|
| 39 |
+
# T range = Range(T start, T limit, T delta)
|
| 40 |
+
dtype = node.get_attr_int("Tidx")
|
| 41 |
+
shape = node.output_shapes[0]
|
| 42 |
+
utils.make_sure(dtype is not None, "Tidx of %s is None", node.name)
|
| 43 |
+
ctx.remove_node(node.name)
|
| 44 |
+
make_range(ctx, node.input[0], node.input[1], node.input[2], node.output[0], node.name, shape, dtype)
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
@tf_op("Conv2DBackpropInput", domain=constants.MICROSOFT_DOMAIN, onnx_op="ConvTransposeWithDynamicPads")
|
| 48 |
+
class ConvTransposeWithDynamicPads:
|
| 49 |
+
@classmethod
|
| 50 |
+
def version_1(cls, ctx, node, **kwargs):
|
| 51 |
+
# T output = Conv2DBackpropInput(int32 input_sizes, T filter, T out_backprop,
|
| 52 |
+
# @list(int) strides, @bool use_cudnn_on_gpu, @string padding, @string data_format, @list(int) dilations)
|
| 53 |
+
# T Y = ConvTranspose(T X, T W, T B, T pads, @STRING auto_pad, @INTS dilations,
|
| 54 |
+
# @INT group, @INTS kernel_shape, @INTS output_shape, @INTS strides)
|
| 55 |
+
|
| 56 |
+
# tf uses "output_shape" while onnx uses "pads", the equation to calculate pads is:
|
| 57 |
+
# total_padding[i] = stride[i] * (input_shape[i] - 1)+ kernel_shape[i] - output_shape[i]
|
| 58 |
+
# pads[i_begin] = total_padding[i]/2
|
| 59 |
+
# pads[i_end] = total_padding[i] - (total_padding[i]/2)
|
| 60 |
+
# output dtype of onnx "shape" is int64 while in tf dtype could be specified
|
| 61 |
+
utils.make_sure(node.is_nhwc(), "only support NHWC for now")
|
| 62 |
+
node.domain = constants.MICROSOFT_DOMAIN
|
| 63 |
+
input_shape = ctx.make_node("Shape", [node.input[2]])
|
| 64 |
+
hw_indices = ctx.make_const(utils.make_name("hw_indices"), np.array([1, 2]).astype(np.int64))
|
| 65 |
+
input_shape_hw = ctx.make_node("Gather", [input_shape.output[0], hw_indices.output[0]])
|
| 66 |
+
output_shape = node.input[0]
|
| 67 |
+
if ctx.get_dtype(output_shape) != onnx_pb.TensorProto.INT64:
|
| 68 |
+
output_shape = ctx.make_node("Cast", [output_shape], attr={"to": onnx_pb.TensorProto.INT64}).output[0]
|
| 69 |
+
output_shape_hw = ctx.make_node("Gather", [output_shape, hw_indices.output[0]])
|
| 70 |
+
kernel_shape_hw = list(ctx.get_shape(node.input[1]))[0:2]
|
| 71 |
+
kernel_shape = ctx.make_const(utils.make_name("const_convtrans"), np.array(kernel_shape_hw).astype(np.int64))
|
| 72 |
+
strides = conv_dims_attr(node, "strides")
|
| 73 |
+
utils.make_sure(len(strides) == 2, "only stride of H and W needed")
|
| 74 |
+
|
| 75 |
+
stride_node = ctx.make_const(utils.make_name("const_convtrans"), np.array(strides).astype(np.int64))
|
| 76 |
+
const_one = ctx.make_const(utils.make_name("cosnt_one"), np.array([1]).astype(np.int64))
|
| 77 |
+
const_two = ctx.make_const(utils.make_name("cosnt_two"), np.array([2]).astype(np.int64))
|
| 78 |
+
|
| 79 |
+
tmp0 = ctx.make_node("Sub", [input_shape_hw.output[0], const_one.output[0]])
|
| 80 |
+
tmp1 = ctx.make_node("Mul", [stride_node.output[0], tmp0.output[0]])
|
| 81 |
+
tmp2 = ctx.make_node("Add", [tmp1.output[0], kernel_shape.output[0]])
|
| 82 |
+
total_pads = ctx.make_node("Sub", [tmp2.output[0], output_shape_hw.output[0]],
|
| 83 |
+
dtypes=[onnx_pb.TensorProto.INT64])
|
| 84 |
+
pads_beg = ctx.make_node("Div", [total_pads.output[0], const_two.output[0]], dtypes=[onnx_pb.TensorProto.INT64])
|
| 85 |
+
pads_end = ctx.make_node("Sub", [total_pads.output[0], pads_beg.output[0]])
|
| 86 |
+
pads = ctx.make_node("Concat", [pads_beg.output[0], pads_end.output[0]], attr={"axis": 0})
|
| 87 |
+
# set node's attrs, Note: output_padding, group are left default.
|
| 88 |
+
conv_dims_attr(node, "dilations")
|
| 89 |
+
# set node's inputs from (output_shape, filter, input_tensor) to (input_tensor, filter, pads, Bias)
|
| 90 |
+
ctx.replace_input(node, node.input[0], node.input[2], 0)
|
| 91 |
+
ctx.replace_input(node, node.input[2], pads.output[0], 2)
|
| 92 |
+
conv_convert_inputs(ctx, node, with_kernel=True)
|
| 93 |
+
node.attr.pop("data_format")
|
| 94 |
+
node.attr.pop("padding")
|
| 95 |
+
if "explicit_paddings" in node.attr:
|
| 96 |
+
node.attr.pop("explicit_paddings")
|
| 97 |
+
|
| 98 |
+
@tf_op("CropAndResize", domain=constants.MICROSOFT_DOMAIN)
|
| 99 |
+
class CropAndResize:
|
| 100 |
+
@classmethod
|
| 101 |
+
def version_1(cls, ctx, node, **kwargs):
|
| 102 |
+
""" utilize contrib cropandresize """
|
| 103 |
+
node.attr['method'].name = 'mode'
|
| 104 |
+
node.domain = constants.MICROSOFT_DOMAIN
|
| 105 |
+
ctx.insert_new_node_on_input(node, "Transpose", node.input[0], perm=constants.NHWC_TO_NCHW)
|
| 106 |
+
ctx.insert_new_node_on_output("Transpose", node.output[0], node.name + '_transposed',
|
| 107 |
+
None, perm=constants.NCHW_TO_NHWC)
|
| 108 |
+
|
| 109 |
+
@tf_op("MatrixInverse", domain=constants.MICROSOFT_DOMAIN, onnx_op="Inverse")
|
| 110 |
+
class Inverse:
|
| 111 |
+
@classmethod
|
| 112 |
+
def version_1(cls, ctx, node, **kwargs):
|
| 113 |
+
utils.make_sure(node.get_attr('adjoint').i == 0, "adjoint must be false")
|
| 114 |
+
del node.attr["adjoint"]
|
| 115 |
+
node.domain = constants.MICROSOFT_DOMAIN
|
lib/python3.10/site-packages/tf2onnx/custom_opsets/onnx_ml.py
ADDED
|
@@ -0,0 +1,99 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
|
| 3 |
+
""" tf2onnx mapping functions for onnx ml domain. """
|
| 4 |
+
import logging
|
| 5 |
+
import numpy as np
|
| 6 |
+
from onnx import TensorProto
|
| 7 |
+
from onnx import numpy_helper
|
| 8 |
+
from tf2onnx import constants
|
| 9 |
+
from tf2onnx.handler import tf_op
|
| 10 |
+
from tf2onnx import utils
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
logger = logging.getLogger(__name__)
|
| 14 |
+
|
| 15 |
+
# pylint: disable=unused-argument,missing-docstring,unnecessary-pass
|
| 16 |
+
|
| 17 |
+
@tf_op("HashTableV2")
|
| 18 |
+
class HashTable:
|
| 19 |
+
@classmethod
|
| 20 |
+
def version_8(cls, ctx, node, **kwargs):
|
| 21 |
+
""" HashTable will be removed """
|
| 22 |
+
pass
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
@tf_op("LookupTableFindV2")
|
| 26 |
+
class LookupTableFind:
|
| 27 |
+
@classmethod
|
| 28 |
+
def version_8(cls, ctx, node, initialized_tables, **kwargs):
|
| 29 |
+
""" convert lookup to category mapper """
|
| 30 |
+
table_node = node.inputs[0]
|
| 31 |
+
while table_node.type == 'Identity':
|
| 32 |
+
table_node = table_node.inputs[0]
|
| 33 |
+
shared_name = table_node.get_attr_value("shared_name")
|
| 34 |
+
utils.make_sure(shared_name is not None, "Could not determine table shared name for node %s", node.name)
|
| 35 |
+
utils.make_sure(shared_name in initialized_tables, "Initialized table %s for node %s not found.",
|
| 36 |
+
shared_name, node.name)
|
| 37 |
+
|
| 38 |
+
default_node = node.inputs[2]
|
| 39 |
+
utils.make_sure(default_node.is_const(), "Default value of table lookup must be const.")
|
| 40 |
+
default_val_np = default_node.get_tensor_value(as_list=False)
|
| 41 |
+
default_val = default_node.get_tensor_value()
|
| 42 |
+
|
| 43 |
+
dtype = ctx.get_dtype(node.output[0])
|
| 44 |
+
in_dtype = ctx.get_dtype(node.input[1])
|
| 45 |
+
utils.make_sure(dtype == TensorProto.INT64 and in_dtype == TensorProto.STRING,
|
| 46 |
+
"Only lookup tables of type string->int64 are currently supported.")
|
| 47 |
+
|
| 48 |
+
cats_strings, cats_int64s = initialized_tables[shared_name]
|
| 49 |
+
shape = ctx.get_shape(node.output[0])
|
| 50 |
+
|
| 51 |
+
node_name = node.name
|
| 52 |
+
node_inputs = node.input
|
| 53 |
+
node_outputs = node.output
|
| 54 |
+
|
| 55 |
+
if node.inputs[1].is_const():
|
| 56 |
+
# Handle explicitly since const folding doesn't work for tables
|
| 57 |
+
key_np = node.inputs[1].get_tensor_value(as_list=False)
|
| 58 |
+
ctx.remove_node(node.name)
|
| 59 |
+
key_to_val = dict(zip(cats_strings, cats_int64s))
|
| 60 |
+
def lookup_value(key):
|
| 61 |
+
return key_to_val.get(key.encode("UTF-8"), default_val_np)
|
| 62 |
+
lookup_result = np.vectorize(lookup_value)(key_np)
|
| 63 |
+
onnx_tensor = numpy_helper.from_array(lookup_result, node_name)
|
| 64 |
+
ctx.make_node("Const", name=node_name, inputs=[], outputs=node_outputs,
|
| 65 |
+
attr={"value": onnx_tensor}, shapes=[lookup_result.shape], dtypes=[dtype])
|
| 66 |
+
else:
|
| 67 |
+
ctx.remove_node(node.name)
|
| 68 |
+
ctx.make_node("CategoryMapper", domain=constants.AI_ONNX_ML_DOMAIN,
|
| 69 |
+
name=node_name, inputs=[node_inputs[1]], outputs=node_outputs,
|
| 70 |
+
attr={'cats_int64s': cats_int64s, 'cats_strings': cats_strings, 'default_int64': default_val},
|
| 71 |
+
shapes=[shape], dtypes=[dtype])
|
| 72 |
+
|
| 73 |
+
customer_nodes = ctx.find_output_consumers(table_node.output[0])
|
| 74 |
+
if len(customer_nodes) == 0:
|
| 75 |
+
ctx.remove_node(table_node.name)
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
@tf_op("LookupTableSizeV2")
|
| 79 |
+
class LookupTableSize:
|
| 80 |
+
@classmethod
|
| 81 |
+
def version_1(cls, ctx, node, initialized_tables, **kwargs):
|
| 82 |
+
table_node = node.inputs[0]
|
| 83 |
+
while table_node.type == 'Identity':
|
| 84 |
+
table_node = table_node.inputs[0]
|
| 85 |
+
shared_name = table_node.get_attr_value("shared_name")
|
| 86 |
+
utils.make_sure(shared_name is not None, "Could not determine table shared name for node %s", node.name)
|
| 87 |
+
utils.make_sure(shared_name in initialized_tables, "Initialized table %s for node %s not found.",
|
| 88 |
+
shared_name, node.name)
|
| 89 |
+
keys, _ = initialized_tables[shared_name]
|
| 90 |
+
|
| 91 |
+
node_name = node.name
|
| 92 |
+
node_outputs = node.output
|
| 93 |
+
ctx.remove_node(node.name)
|
| 94 |
+
size_const = ctx.make_const(node_name, np.array(len(keys), dtype=np.int64))
|
| 95 |
+
ctx.replace_all_inputs(node_outputs[0], size_const.output[0])
|
| 96 |
+
|
| 97 |
+
customer_nodes = ctx.find_output_consumers(table_node.output[0])
|
| 98 |
+
if len(customer_nodes) == 0:
|
| 99 |
+
ctx.remove_node(table_node.name)
|
lib/python3.10/site-packages/tf2onnx/custom_opsets/string_ops.py
ADDED
|
@@ -0,0 +1,170 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
|
| 3 |
+
""" tf2onnx mapping functions for string ops using contrib ops domain. """
|
| 4 |
+
import logging
|
| 5 |
+
import numpy as np
|
| 6 |
+
from onnx.onnx_pb import TensorProto
|
| 7 |
+
|
| 8 |
+
from tf2onnx import constants, handler
|
| 9 |
+
from tf2onnx.handler import tf_op
|
| 10 |
+
from tf2onnx import utils
|
| 11 |
+
from tf2onnx.graph_builder import GraphBuilder
|
| 12 |
+
|
| 13 |
+
logger = logging.getLogger(__name__)
|
| 14 |
+
|
| 15 |
+
# pylint: disable=unused-argument,missing-docstring
|
| 16 |
+
|
| 17 |
+
@tf_op(["StringSplit", "StringSplitV2"], domain=constants.CONTRIB_OPS_DOMAIN)
|
| 18 |
+
class StringOps:
|
| 19 |
+
@classmethod
|
| 20 |
+
def any_version(cls, opset, ctx, node, **kwargs):
|
| 21 |
+
if node.type == "StringSplit":
|
| 22 |
+
skip_empty = node.get_attr_value('skip_empty', True)
|
| 23 |
+
else:
|
| 24 |
+
skip_empty = False
|
| 25 |
+
node.type = "StringSplit"
|
| 26 |
+
node.domain = constants.CONTRIB_OPS_DOMAIN
|
| 27 |
+
for a in list(node.attr.keys()):
|
| 28 |
+
del node.attr[a]
|
| 29 |
+
unsqueeze_node = GraphBuilder(ctx).make_unsqueeze({'data': node.input[1], 'axes': [0]}, return_node=True)
|
| 30 |
+
|
| 31 |
+
skip_empty_const = ctx.make_const(utils.make_name('skip_empty_const'), np.array([skip_empty], np.bool))
|
| 32 |
+
ctx.replace_inputs(node, [node.input[0], unsqueeze_node.output[0], skip_empty_const.output[0]])
|
| 33 |
+
|
| 34 |
+
@classmethod
|
| 35 |
+
def version_1(cls, ctx, node, **kwargs):
|
| 36 |
+
cls.any_version(1, ctx, node, **kwargs)
|
| 37 |
+
|
| 38 |
+
@classmethod
|
| 39 |
+
def version_13(cls, ctx, node, **kwargs):
|
| 40 |
+
cls.any_version(13, ctx, node, **kwargs)
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
@tf_op("StringToHashBucketFast", domain=constants.CONTRIB_OPS_DOMAIN)
|
| 44 |
+
class StringToHashBucketFast:
|
| 45 |
+
@classmethod
|
| 46 |
+
def version_1(cls, ctx, node, **kwargs):
|
| 47 |
+
node.domain = constants.CONTRIB_OPS_DOMAIN
|
| 48 |
+
num_buckets = node.get_attr_int('num_buckets')
|
| 49 |
+
num_buckets_const = ctx.make_const(utils.make_name('num_buckets'), np.array([num_buckets], dtype=np.int64))
|
| 50 |
+
ctx.replace_inputs(node, [node.input[0], num_buckets_const.output[0]])
|
| 51 |
+
del node.attr['num_buckets']
|
| 52 |
+
|
| 53 |
+
@tf_op("StaticRegexReplace", domain=constants.CONTRIB_OPS_DOMAIN)
|
| 54 |
+
class StaticRegexReplace:
|
| 55 |
+
@classmethod
|
| 56 |
+
def version_1(cls, ctx, node, **kwargs):
|
| 57 |
+
node.domain = constants.CONTRIB_OPS_DOMAIN
|
| 58 |
+
node.type = "StringRegexReplace"
|
| 59 |
+
pattern = node.get_attr_str("pattern")
|
| 60 |
+
rewrite = node.get_attr_str("rewrite")
|
| 61 |
+
utils.make_sure(node.get_attr_value("replace_global") != 0,
|
| 62 |
+
"Can not convert StaticRegexReplace if replace_global is False")
|
| 63 |
+
pattern_node = ctx.make_const(utils.make_name("pattern"), np.array([pattern], np.object))
|
| 64 |
+
rewrite_node = ctx.make_const(utils.make_name("rewrite"), np.array([rewrite], np.object))
|
| 65 |
+
del node.attr["pattern"]
|
| 66 |
+
del node.attr["rewrite"]
|
| 67 |
+
del node.attr["replace_global"]
|
| 68 |
+
ctx.replace_inputs(node, [node.input[0], pattern_node.output[0], rewrite_node.output[0]])
|
| 69 |
+
|
| 70 |
+
@tf_op("StringJoin", domain=constants.CONTRIB_OPS_DOMAIN)
|
| 71 |
+
class StringJoin:
|
| 72 |
+
@classmethod
|
| 73 |
+
def any_version(cls, opset, ctx, node, **kwargs):
|
| 74 |
+
node.domain = constants.CONTRIB_OPS_DOMAIN
|
| 75 |
+
separator = node.get_attr_value("separator")
|
| 76 |
+
if separator is None:
|
| 77 |
+
separator = b''
|
| 78 |
+
separator = separator.decode('UTF-8')
|
| 79 |
+
separator_node = ctx.make_const(utils.make_name("separator"), np.array([separator], np.object))
|
| 80 |
+
axis_node = ctx.make_const(utils.make_name("axis"), np.array([0], np.int64))
|
| 81 |
+
inps_with_shapes = [i for i in node.input if ctx.get_shape(i) != []]
|
| 82 |
+
shape_node = None
|
| 83 |
+
if 0 < len(inps_with_shapes) < len(node.input):
|
| 84 |
+
shape_node = ctx.make_node("Shape", [inps_with_shapes[0]])
|
| 85 |
+
unsqueezes = []
|
| 86 |
+
for inp in node.input:
|
| 87 |
+
if ctx.get_shape(inp) == [] and shape_node is not None:
|
| 88 |
+
expand_node = ctx.make_node("Expand", [inp, shape_node.output[0]])
|
| 89 |
+
inp = expand_node.output[0]
|
| 90 |
+
unsqueeze_node = GraphBuilder(ctx).make_unsqueeze({'data': inp, 'axes': [0]})
|
| 91 |
+
unsqueezes.append(unsqueeze_node)
|
| 92 |
+
stack_node = ctx.make_node("Concat", unsqueezes, attr={'axis': 0})
|
| 93 |
+
ctx.replace_inputs(node, [stack_node.output[0], separator_node.output[0], axis_node.output[0]])
|
| 94 |
+
|
| 95 |
+
@classmethod
|
| 96 |
+
def version_1(cls, ctx, node, **kwargs):
|
| 97 |
+
cls.any_version(1, ctx, node, **kwargs)
|
| 98 |
+
|
| 99 |
+
@classmethod
|
| 100 |
+
def version_13(cls, ctx, node, **kwargs):
|
| 101 |
+
cls.any_version(13, ctx, node, **kwargs)
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
@tf_op(["Equal", "NotEqual"], domain=constants.CONTRIB_OPS_DOMAIN)
|
| 105 |
+
class StringEqual:
|
| 106 |
+
@classmethod
|
| 107 |
+
def version_1(cls, ctx, node, **kwargs):
|
| 108 |
+
dtype = ctx.get_dtype(node.input[0])
|
| 109 |
+
if dtype != TensorProto.STRING:
|
| 110 |
+
# Fallback to normal domain conversion
|
| 111 |
+
func, _ = handler.tf_op.find_effective_op(node.type, constants.ONNX_DOMAIN)
|
| 112 |
+
func(ctx, node, **kwargs)
|
| 113 |
+
return
|
| 114 |
+
|
| 115 |
+
need_not = node.type == "NotEqual"
|
| 116 |
+
node.type = "StringEqual"
|
| 117 |
+
node.domain = constants.CONTRIB_OPS_DOMAIN
|
| 118 |
+
if need_not:
|
| 119 |
+
output_name = node.output[0]
|
| 120 |
+
not_node = ctx.insert_new_node_on_output("Not", output_name, name=utils.make_name(node.name))
|
| 121 |
+
ctx.copy_shape(output_name, not_node.output[0])
|
| 122 |
+
ctx.copy_dtype(output_name, not_node.output[0])
|
| 123 |
+
|
| 124 |
+
@tf_op(["StringLower", "StringUpper"])
|
| 125 |
+
class StringLower:
|
| 126 |
+
@classmethod
|
| 127 |
+
def version_10(cls, ctx, node, **kwargs):
|
| 128 |
+
if node.type == "StringLower":
|
| 129 |
+
case_action = "LOWER"
|
| 130 |
+
else:
|
| 131 |
+
case_action = "UPPER"
|
| 132 |
+
node.type = "StringNormalizer"
|
| 133 |
+
str_input = node.input[0]
|
| 134 |
+
rank = ctx.get_rank(node.input[0])
|
| 135 |
+
shape = ctx.get_shape(node.input[0])
|
| 136 |
+
if rank != 1:
|
| 137 |
+
ctx.insert_new_node_on_input(node, "Flatten", node.input[0], axis=0)
|
| 138 |
+
node.set_attr("case_change_action", case_action)
|
| 139 |
+
if rank != 1:
|
| 140 |
+
if shape is None or -1 in shape:
|
| 141 |
+
new_shape = ctx.make_node("Shape", [str_input]).output[0]
|
| 142 |
+
else:
|
| 143 |
+
new_shape = ctx.make_const(utils.make_name("shape"), np.array(shape, np.int64)).output[0]
|
| 144 |
+
ctx.insert_new_node_on_output("Reshape", node.output[0], inputs=[node.output[0], new_shape])
|
| 145 |
+
|
| 146 |
+
@tf_op("SentencepieceOp", domain=constants.CONTRIB_OPS_DOMAIN)
|
| 147 |
+
class SentencepieceOp:
|
| 148 |
+
@classmethod
|
| 149 |
+
def version_1(cls, ctx, node, **kwargs):
|
| 150 |
+
# This op will be removed when its consumer is converted
|
| 151 |
+
pass
|
| 152 |
+
|
| 153 |
+
@tf_op("SentencepieceTokenizeOp", domain=constants.CONTRIB_OPS_DOMAIN)
|
| 154 |
+
class SentencepieceTokenizeOp:
|
| 155 |
+
@classmethod
|
| 156 |
+
def version_1(cls, ctx, node, **kwargs):
|
| 157 |
+
node.domain = constants.CONTRIB_OPS_DOMAIN
|
| 158 |
+
input_node = node.inputs[0]
|
| 159 |
+
utils.make_sure(input_node.type == "SentencepieceOp", "Input 0 to node %s is not SentencepieceOp", node.name)
|
| 160 |
+
ctx.remove_input(node, node.input[0], 0)
|
| 161 |
+
|
| 162 |
+
nbest_size_cast = ctx.make_node("Cast", [node.input[1]], attr={'to': TensorProto.INT64}).output[0]
|
| 163 |
+
ctx.replace_input(node, node.input[1], nbest_size_cast, 1)
|
| 164 |
+
for i in range(1, len(node.input)):
|
| 165 |
+
unsqueeze = GraphBuilder(ctx).make_unsqueeze({'data': node.input[i], 'axes': [0]})
|
| 166 |
+
ctx.replace_input(node, node.input[i], unsqueeze, i)
|
| 167 |
+
node.set_attr("model", input_node.attr['model'].s)
|
| 168 |
+
node.type = "SentencepieceTokenizer"
|
| 169 |
+
if ctx.is_safe_to_remove_nodes([input_node]):
|
| 170 |
+
ctx.remove_node(input_node.name)
|
lib/python3.10/site-packages/tf2onnx/onnx_opset/__init__.py
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
|
| 3 |
+
"""tf2onnx.onnx_opset module"""
|
| 4 |
+
|
| 5 |
+
from . import (
|
| 6 |
+
common,
|
| 7 |
+
controlflow,
|
| 8 |
+
generator,
|
| 9 |
+
logical,
|
| 10 |
+
math,
|
| 11 |
+
misc,
|
| 12 |
+
nn,
|
| 13 |
+
quantize,
|
| 14 |
+
reduction,
|
| 15 |
+
rnn,
|
| 16 |
+
signal,
|
| 17 |
+
tensor,
|
| 18 |
+
traditionalml
|
| 19 |
+
)
|
lib/python3.10/site-packages/tf2onnx/onnx_opset/common.py
ADDED
|
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
"""
|
| 5 |
+
common
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
from __future__ import division
|
| 9 |
+
from __future__ import print_function
|
| 10 |
+
from __future__ import unicode_literals
|
| 11 |
+
|
| 12 |
+
import logging
|
| 13 |
+
|
| 14 |
+
from tf2onnx import constants
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
logger = logging.getLogger(__name__)
|
| 18 |
+
|
| 19 |
+
# pylint: disable=unused-argument,missing-docstring
|
| 20 |
+
|
| 21 |
+
class BroadcastOp:
|
| 22 |
+
@classmethod
|
| 23 |
+
def version_1(cls, ctx, node, **kwargs):
|
| 24 |
+
"""Elementwise Ops with broadcast flag."""
|
| 25 |
+
if node.type == "AddV2":
|
| 26 |
+
node.type = "Add"
|
| 27 |
+
shape0 = ctx.get_shape(node.input[0])
|
| 28 |
+
shape1 = ctx.get_shape(node.input[1])
|
| 29 |
+
if shape0 != shape1:
|
| 30 |
+
node.set_attr("broadcast", 1)
|
| 31 |
+
# this works around shortcomings in the broadcasting code
|
| 32 |
+
# of caffe2 and winml/rs4.
|
| 33 |
+
if ctx.is_target(constants.TARGET_RS4):
|
| 34 |
+
# in rs4 mul and add do not support scalar correctly
|
| 35 |
+
if not shape0:
|
| 36 |
+
if node.inputs[0].is_const():
|
| 37 |
+
shape0 = node.inputs[0].scalar_to_dim1()
|
| 38 |
+
if not shape1:
|
| 39 |
+
if node.inputs[1].is_const():
|
| 40 |
+
shape1 = node.inputs[1].scalar_to_dim1()
|
| 41 |
+
if shape0 and shape1 and len(shape0) < len(shape1) and node.type in ["Mul", "Add"]:
|
| 42 |
+
tmp = node.input[0]
|
| 43 |
+
ctx.replace_input(node, node.input[0], node.input[1], 0)
|
| 44 |
+
ctx.replace_input(node, node.input[1], tmp, 1)
|
| 45 |
+
else:
|
| 46 |
+
node.set_attr("broadcast", 0)
|
| 47 |
+
|
| 48 |
+
@classmethod
|
| 49 |
+
def version_6(cls, ctx, node, **kwargs):
|
| 50 |
+
"""Elementwise Ops with broadcast flag."""
|
| 51 |
+
if node.type == "AddV2":
|
| 52 |
+
node.type = "Add"
|
| 53 |
+
shape0 = ctx.get_shape(node.input[0])
|
| 54 |
+
shape1 = ctx.get_shape(node.input[1])
|
| 55 |
+
if shape0 != shape1:
|
| 56 |
+
# this works around shortcomings in the broadcasting code
|
| 57 |
+
# of caffe2 and winml/rs4.
|
| 58 |
+
if ctx.is_target(constants.TARGET_RS4):
|
| 59 |
+
# in rs4 mul and add do not support scalar correctly
|
| 60 |
+
if not shape0:
|
| 61 |
+
if node.inputs[0].is_const():
|
| 62 |
+
shape0 = node.inputs[0].scalar_to_dim1()
|
| 63 |
+
if not shape1:
|
| 64 |
+
if node.inputs[1].is_const():
|
| 65 |
+
shape1 = node.inputs[1].scalar_to_dim1()
|
| 66 |
+
if shape0 and shape1 and len(shape0) < len(shape1) and node.type in ["Mul", "Add"]:
|
| 67 |
+
tmp = node.input[0]
|
| 68 |
+
ctx.replace_input(node, node.input[0], node.input[1], 0)
|
| 69 |
+
ctx.replace_input(node, node.input[1], tmp, 1)
|
lib/python3.10/site-packages/tf2onnx/onnx_opset/controlflow.py
ADDED
|
@@ -0,0 +1,672 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
"""
|
| 5 |
+
controlflow
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
from __future__ import division
|
| 9 |
+
from __future__ import print_function
|
| 10 |
+
from __future__ import unicode_literals
|
| 11 |
+
|
| 12 |
+
import copy
|
| 13 |
+
import logging
|
| 14 |
+
|
| 15 |
+
import numpy as np
|
| 16 |
+
|
| 17 |
+
from onnx import onnx_pb
|
| 18 |
+
from onnx.onnx_pb import TensorProto
|
| 19 |
+
from tf2onnx import utils
|
| 20 |
+
from tf2onnx.handler import tf_op
|
| 21 |
+
from tf2onnx.tf_loader import find_function
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
logger = logging.getLogger(__name__)
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
# pylint: disable=unused-argument,missing-docstring
|
| 28 |
+
|
| 29 |
+
def make_range_const(ctx, start, limit, delta, output, scope_name, shape, dtype):
|
| 30 |
+
"""make Range subgraph if all inputs are const."""
|
| 31 |
+
# T range = Range(T start, T limit, T delta)
|
| 32 |
+
# V v_final_and_scan_outputs = Loop(int64 M, B cond, V v_initial)
|
| 33 |
+
base_name = utils.make_name(scope_name)
|
| 34 |
+
start = ctx.get_node_by_output(start).get_tensor_value(as_list=False)
|
| 35 |
+
limit = ctx.get_node_by_output(limit).get_tensor_value(as_list=False)
|
| 36 |
+
delta = ctx.get_node_by_output(delta).get_tensor_value(as_list=False)
|
| 37 |
+
val = np.arange(start, limit, delta, dtype=start.dtype)
|
| 38 |
+
const_range = ctx.make_const(base_name, val)
|
| 39 |
+
ctx.make_node("Identity", [const_range.output[0]], shapes=[shape], dtypes=[dtype], outputs=[output])
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
def make_range_non_const(ctx, start, limit, delta, output, scope_name, shape, dtype):
|
| 43 |
+
"""make Range subgraph."""
|
| 44 |
+
# T range = Range(T start, T limit, T delta)
|
| 45 |
+
# V v_final_and_scan_outputs = Loop(int64 M, B cond, V v_initial)
|
| 46 |
+
base_name = utils.make_name(scope_name)
|
| 47 |
+
|
| 48 |
+
# trip_count
|
| 49 |
+
diff_node = ctx.make_node("Sub",
|
| 50 |
+
[limit, start],
|
| 51 |
+
op_name_scope=base_name,
|
| 52 |
+
name=utils.make_name("diff"))
|
| 53 |
+
diff_output = diff_node.output[0]
|
| 54 |
+
|
| 55 |
+
delta_cast = delta
|
| 56 |
+
if dtype in [TensorProto.INT32, TensorProto.INT64]:
|
| 57 |
+
cast_node = ctx.make_node("Cast", [diff_output], op_name_scope=base_name,
|
| 58 |
+
name="cast_diff", attr={"to": TensorProto.FLOAT})
|
| 59 |
+
diff_output = cast_node.output[0]
|
| 60 |
+
|
| 61 |
+
cast_node = ctx.make_node("Cast", [delta], op_name_scope=base_name, name="cast_delta",
|
| 62 |
+
attr={"to": TensorProto.FLOAT})
|
| 63 |
+
delta_cast = cast_node.output[0]
|
| 64 |
+
div_node = ctx.make_node("Div", [diff_output, delta_cast], op_name_scope=base_name, name="div")
|
| 65 |
+
ceil_node = ctx.make_node("Ceil", [div_node.output[0]], op_name_scope=base_name, name="ceil")
|
| 66 |
+
trip_count_node = ctx.make_node("Cast", [ceil_node.output[0]], op_name_scope=base_name, name="trip_cnt",
|
| 67 |
+
attr={"to": TensorProto.INT64})
|
| 68 |
+
|
| 69 |
+
# cond
|
| 70 |
+
# Use initializer here since Constant OP before opset 9 does not support bool type
|
| 71 |
+
cond_name = "{}_cond".format(base_name)
|
| 72 |
+
ctx.make_const(cond_name, np.ones((), dtype=bool))
|
| 73 |
+
|
| 74 |
+
# body
|
| 75 |
+
g = ctx.create_new_graph_with_same_config()
|
| 76 |
+
g.parent_graph = ctx
|
| 77 |
+
g.add_graph_input("i", TensorProto.INT64, [])
|
| 78 |
+
g.add_graph_input("cond", TensorProto.BOOL, [])
|
| 79 |
+
g.add_graph_input("prev", dtype, [])
|
| 80 |
+
|
| 81 |
+
g.make_node("Identity", ["cond"], outputs=["cond_out"])
|
| 82 |
+
g.make_node("Add", ["prev", delta], outputs=["current"], name=utils.make_name("add"))
|
| 83 |
+
g.make_node("Identity", ["prev"], outputs=["range"])
|
| 84 |
+
|
| 85 |
+
g.add_graph_output("cond_out", TensorProto.BOOL, [])
|
| 86 |
+
g.add_graph_output("current", dtype, [])
|
| 87 |
+
g.add_graph_output("range", dtype, [])
|
| 88 |
+
|
| 89 |
+
# loop
|
| 90 |
+
loop_inputs = [trip_count_node.output[0], cond_name, start]
|
| 91 |
+
branches = {"body": g}
|
| 92 |
+
loop_node = ctx.make_node("Loop", loop_inputs,
|
| 93 |
+
output_count=2, op_name_scope=base_name, name="loop", branches=branches)
|
| 94 |
+
|
| 95 |
+
ctx.make_node("Identity", [loop_node.output[1]], name=base_name, shapes=[shape], dtypes=[dtype], outputs=[output])
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
def make_range(ctx, start, limit, delta, output, scope_name, shape, dtype):
|
| 99 |
+
if all(ctx.get_node_by_output(n).is_const() for n in [start, limit, delta]) is True:
|
| 100 |
+
make_range_const(ctx, start, limit, delta, output, scope_name, shape, dtype)
|
| 101 |
+
else:
|
| 102 |
+
make_range_non_const(ctx, start, limit, delta, output, scope_name, shape, dtype)
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
@tf_op(["Loop", "Scan"])
|
| 106 |
+
class PassThroughOp:
|
| 107 |
+
@classmethod
|
| 108 |
+
def version_7(cls, ctx, node, **kwargs):
|
| 109 |
+
pass
|
| 110 |
+
|
| 111 |
+
@classmethod
|
| 112 |
+
def version_11(cls, ctx, node, **kwargs):
|
| 113 |
+
# no change needed
|
| 114 |
+
# loop has 1 less mandatory input
|
| 115 |
+
# if = only doc changes
|
| 116 |
+
# scan has 1 less mandatory input and 4 extra attrs
|
| 117 |
+
pass
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
@tf_op("Range")
|
| 121 |
+
class Range:
|
| 122 |
+
@classmethod
|
| 123 |
+
def version_7(cls, ctx, node, **kwargs):
|
| 124 |
+
"""Range."""
|
| 125 |
+
# T range = Range(T start, T limit, T delta)
|
| 126 |
+
# V v_final_and_scan_outputs = Loop(int64 M, B cond, V v_initial)
|
| 127 |
+
dtype = node.get_attr_int("Tidx")
|
| 128 |
+
shape = node.output_shapes[0]
|
| 129 |
+
utils.make_sure(dtype is not None, "Tidx of %s is None", node.name)
|
| 130 |
+
ctx.remove_node(node.name)
|
| 131 |
+
make_range(ctx, node.input[0], node.input[1], node.input[2],
|
| 132 |
+
node.output[0], node.name, shape, dtype)
|
| 133 |
+
|
| 134 |
+
@classmethod
|
| 135 |
+
def version_11(cls, ctx, node, **kwargs):
|
| 136 |
+
# opset 11 implements Range op explicitly
|
| 137 |
+
pass
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
@tf_op(["Select", "SelectV2"])
|
| 141 |
+
class Select:
|
| 142 |
+
@classmethod
|
| 143 |
+
def version_7(cls, ctx, node, **kwargs):
|
| 144 |
+
# T output = Select(bool condition, T x, T y)
|
| 145 |
+
# Select_res = Add(Multiply(Cast(bool condition, T), T x,),
|
| 146 |
+
# Multiply(Cast(Not(bool condition), T), T y)).
|
| 147 |
+
# TODO: Fix case where condition is 1-dimensional
|
| 148 |
+
utils.make_sure(len(node.input) > 1, "Select with only condition is not supported.")
|
| 149 |
+
dtype = ctx.get_dtype(node.output[0])
|
| 150 |
+
utils.make_sure(dtype != TensorProto.STRING, "Select with dtype string requires opset 9")
|
| 151 |
+
|
| 152 |
+
cond_shape = ctx.get_shape(node.input[0])
|
| 153 |
+
input_shape = ctx.get_shape(node.input[1])
|
| 154 |
+
if input_shape is None:
|
| 155 |
+
input_shape = ctx.get_shape(node.input[2])
|
| 156 |
+
input_rank = len(input_shape) if input_shape is not None else None
|
| 157 |
+
cond_rank = len(cond_shape) if cond_shape is not None else None
|
| 158 |
+
# if cond shape is 1-dimensional while input has higher rank, need to be reshaped to broadcast
|
| 159 |
+
if node.type == "Select" and cond_rank == 1 and input_rank != 1:
|
| 160 |
+
utils.make_sure(input_rank is not None, "input_rank unknown and cond_rank == 1")
|
| 161 |
+
broadcast_shape = [cond_shape[0]] + [1] * (input_rank - 1)
|
| 162 |
+
shape_const = ctx.make_const(utils.make_name(node.name), np.array(broadcast_shape, dtype=np.int64))
|
| 163 |
+
reshape = ctx.make_node("Reshape", [node.input[0], shape_const.output[0]])
|
| 164 |
+
ctx.replace_input(node, node.input[0], reshape.output[0], 0)
|
| 165 |
+
|
| 166 |
+
positive_cast = ctx.make_node("Cast", [node.input[0]], name=utils.make_name(node.name),
|
| 167 |
+
attr={"to": dtype})
|
| 168 |
+
negative = ctx.make_node("Not", [node.input[0]], name=utils.make_name(node.name))
|
| 169 |
+
negative_cast = ctx.make_node("Cast", [negative.output[0]], name=utils.make_name(node.name),
|
| 170 |
+
attr={"to": dtype})
|
| 171 |
+
multiply_1 = ctx.make_node("Mul", [positive_cast.output[0], node.input[1]], name=utils.make_name(node.name))
|
| 172 |
+
multiply_2 = ctx.make_node("Mul", [node.input[2], negative_cast.output[0]], name=utils.make_name(node.name))
|
| 173 |
+
add_name = node.name
|
| 174 |
+
add_out = node.output
|
| 175 |
+
shape = ctx.get_shape(node.output[0])
|
| 176 |
+
ctx.remove_node(node.name)
|
| 177 |
+
ctx.make_node("Add", [multiply_1.output[0], multiply_2.output[0]], outputs=add_out, name=add_name,
|
| 178 |
+
dtypes=[dtype], shapes=[shape])
|
| 179 |
+
|
| 180 |
+
@classmethod
|
| 181 |
+
def version_9(cls, ctx, node, **kwargs):
|
| 182 |
+
# T output = Select(bool condition, T x, T y)
|
| 183 |
+
# T1 output = Where(bool condition, T1 x, T1 y)
|
| 184 |
+
# NOTE: condition can be 1-dimension in tensorflow, while in onnx,
|
| 185 |
+
# it should be broadcastable with other two inputs
|
| 186 |
+
if ctx.get_dtype(node.output[0]) != TensorProto.STRING:
|
| 187 |
+
# Due to bad ORT implementation, Mul/Add ops are faster than Where op
|
| 188 |
+
cls.version_7(ctx, node, **kwargs)
|
| 189 |
+
return
|
| 190 |
+
|
| 191 |
+
cond_shape = ctx.get_shape(node.input[0])
|
| 192 |
+
input_shape = ctx.get_shape(node.input[1])
|
| 193 |
+
if input_shape is None:
|
| 194 |
+
input_shape = ctx.get_shape(node.input[2])
|
| 195 |
+
input_rank = len(input_shape) if input_shape is not None else None
|
| 196 |
+
cond_rank = len(cond_shape) if cond_shape is not None else None
|
| 197 |
+
# if cond shape is 1-dimensional while input has higher rank, need to be reshaped to broadcast
|
| 198 |
+
if node.type == "Select" and cond_rank == 1 and input_rank != 1:
|
| 199 |
+
utils.make_sure(input_rank is not None, "input_rank unknown and cond_rank == 1")
|
| 200 |
+
broadcast_shape = [cond_shape[0]] + [1] * (input_rank - 1)
|
| 201 |
+
shape_const = ctx.make_const(utils.make_name(node.name), np.array(broadcast_shape, dtype=np.int64))
|
| 202 |
+
reshape = ctx.make_node("Reshape", [node.input[0], shape_const.output[0]])
|
| 203 |
+
ctx.replace_input(node, node.input[0], reshape.output[0], 0)
|
| 204 |
+
node.type = "Where"
|
| 205 |
+
|
| 206 |
+
|
| 207 |
+
@tf_op("Where")
|
| 208 |
+
class Where:
|
| 209 |
+
@classmethod
|
| 210 |
+
def version_9(cls, ctx, node, **kwargs):
|
| 211 |
+
# T_y output = Where(T_x condition), return indices of elements whose value are True
|
| 212 |
+
node.type = "NonZero"
|
| 213 |
+
# in onnx, indices are returned in this way [[ind_a_0, ind_b_0, ...], [ind_a_1, ind_b_1,...]];
|
| 214 |
+
# while in tf, the result will be [[ind_a_0, ind_a_1, ...], [ind_b_0, ind_b_1, ...], ...]
|
| 215 |
+
# this is the reason a transpose node inserted here.
|
| 216 |
+
transpose_node = ctx.insert_new_node_on_output("Transpose",
|
| 217 |
+
node.output[0], name=utils.make_name("where_op_added"))
|
| 218 |
+
ctx.copy_shape(node.output[0], transpose_node.output[0])
|
| 219 |
+
ctx.copy_dtype(node.output[0], transpose_node.output[0])
|
| 220 |
+
|
| 221 |
+
|
| 222 |
+
@tf_op(["StatelessIf"])
|
| 223 |
+
class StatelessIfOp:
|
| 224 |
+
@classmethod
|
| 225 |
+
def version_1(cls, ctx, node, **kwargs):
|
| 226 |
+
"""V2 control flow - If"""
|
| 227 |
+
inputs = node.input[1:]
|
| 228 |
+
|
| 229 |
+
output_shapes = node.output_shapes
|
| 230 |
+
output_dtypes = node.output_dtypes
|
| 231 |
+
ctx.remove_node(node.name)
|
| 232 |
+
|
| 233 |
+
# replace the original node
|
| 234 |
+
branches = {}
|
| 235 |
+
for branch in ["then_branch", "else_branch"]:
|
| 236 |
+
func_name = node.get_attr_str(branch)
|
| 237 |
+
g = find_function(func_name)
|
| 238 |
+
g.parent_graph = ctx
|
| 239 |
+
wire_if_branch(ctx, g, inputs, output_shapes, output_dtypes, func_name, node.name)
|
| 240 |
+
branches[branch] = g
|
| 241 |
+
|
| 242 |
+
_ = ctx.make_node("If", node.input[:1], name=node.name, output_count=len(output_shapes),
|
| 243 |
+
shapes=output_shapes, dtypes=output_dtypes, skip_conversion=True, branches=branches)
|
| 244 |
+
|
| 245 |
+
|
| 246 |
+
@tf_op(["If"])
|
| 247 |
+
class IfOp:
|
| 248 |
+
@classmethod
|
| 249 |
+
def version_1(cls, ctx, node, **kwargs):
|
| 250 |
+
"""V2 control flow - If"""
|
| 251 |
+
inputs = node.input[1:]
|
| 252 |
+
|
| 253 |
+
if node.type == "If" and len(inputs) == 0:
|
| 254 |
+
# this comes from the re-writers
|
| 255 |
+
return
|
| 256 |
+
|
| 257 |
+
output_shapes = node.output_shapes
|
| 258 |
+
output_dtypes = node.output_dtypes
|
| 259 |
+
ctx.remove_node(node.name)
|
| 260 |
+
|
| 261 |
+
# replace the original node
|
| 262 |
+
branches = {}
|
| 263 |
+
for branch in ["then_branch", "else_branch"]:
|
| 264 |
+
func_name = node.get_attr_str(branch)
|
| 265 |
+
g = find_function(func_name)
|
| 266 |
+
g.parent_graph = ctx
|
| 267 |
+
wire_if_branch(ctx, g, inputs, output_shapes, output_dtypes, func_name, node.name)
|
| 268 |
+
branches[branch] = g
|
| 269 |
+
|
| 270 |
+
_ = ctx.make_node("If", node.input[:1], name=node.name, output_count=len(output_shapes),
|
| 271 |
+
shapes=output_shapes, dtypes=output_dtypes, outputs=node.output, skip_conversion=True,
|
| 272 |
+
branches=branches)
|
| 273 |
+
|
| 274 |
+
|
| 275 |
+
@tf_op(["TensorListSetItem"])
|
| 276 |
+
class TensorListSetItem:
|
| 277 |
+
@classmethod
|
| 278 |
+
def version_7(cls, ctx, node, **kwargs):
|
| 279 |
+
# handled in 'While'
|
| 280 |
+
pass
|
| 281 |
+
|
| 282 |
+
|
| 283 |
+
@tf_op(["TensorListGetItem"])
|
| 284 |
+
class TensorListGetItem:
|
| 285 |
+
@classmethod
|
| 286 |
+
def version_7(cls, ctx, node, **kwargs):
|
| 287 |
+
ctx.ta_reads.append(node.input[0])
|
| 288 |
+
node.type = "Gather"
|
| 289 |
+
ctx.replace_inputs(node, [node.input[0], node.input[1]])
|
| 290 |
+
|
| 291 |
+
@classmethod
|
| 292 |
+
def version_13(cls, ctx, node, **kwargs):
|
| 293 |
+
cls.version_7(ctx, node, **kwargs)
|
| 294 |
+
|
| 295 |
+
|
| 296 |
+
@tf_op(["TensorListLength"])
|
| 297 |
+
class TensorListLength:
|
| 298 |
+
@classmethod
|
| 299 |
+
def version_7(cls, ctx, node, **kwargs):
|
| 300 |
+
pass
|
| 301 |
+
|
| 302 |
+
|
| 303 |
+
@tf_op(["TensorListReserve", "TensorListResize"])
|
| 304 |
+
class TensorListReserve:
|
| 305 |
+
@classmethod
|
| 306 |
+
def version_7(cls, ctx, node, **kwargs):
|
| 307 |
+
pass
|
| 308 |
+
|
| 309 |
+
|
| 310 |
+
@tf_op(["TensorListFromTensor"])
|
| 311 |
+
class TensorListFromTensor:
|
| 312 |
+
@classmethod
|
| 313 |
+
def version_7(cls, ctx, node, **kwargs):
|
| 314 |
+
consumers = ctx.find_output_consumers(node.output[0])
|
| 315 |
+
if any([c.is_while() for c in consumers]):
|
| 316 |
+
node.type = "Identity"
|
| 317 |
+
ctx.copy_dtype(node.input[0], node.output[0])
|
| 318 |
+
ctx.copy_shape(node.input[0], node.output[0])
|
| 319 |
+
|
| 320 |
+
|
| 321 |
+
@tf_op(["TensorListStack"])
|
| 322 |
+
class TensorListStack:
|
| 323 |
+
@classmethod
|
| 324 |
+
def version_7(cls, ctx, node, **kwargs):
|
| 325 |
+
if node.inputs[0].is_while():
|
| 326 |
+
ctx.remove_node(node.name)
|
| 327 |
+
ctx.replace_all_inputs(node.output[0], node.input[0]) # ops=ctx.get_nodes()
|
| 328 |
+
|
| 329 |
+
|
| 330 |
+
@tf_op(["While", "StatelessWhile"])
|
| 331 |
+
class While:
|
| 332 |
+
@classmethod
|
| 333 |
+
def version_7(cls, ctx, node, **kwargs):
|
| 334 |
+
# the tensorflow while input is:
|
| 335 |
+
# loop_counter, max_iterations, [loop_vars]
|
| 336 |
+
# cond and body use the same inputs
|
| 337 |
+
# outputs are identical to inputs
|
| 338 |
+
tf_while_inputs = node.input
|
| 339 |
+
|
| 340 |
+
# the onnx loop input is:
|
| 341 |
+
# max_iterations, cond, [loop_vars]
|
| 342 |
+
# body uses the inputs:
|
| 343 |
+
# iteration, cond, [loop_vars]
|
| 344 |
+
# the onnx loop output is:
|
| 345 |
+
# cond [v_final_and_scan_outputs]
|
| 346 |
+
|
| 347 |
+
output_shapes = node.output_shapes
|
| 348 |
+
output_dtypes = node.output_dtypes
|
| 349 |
+
# node.output must be copied as some element
|
| 350 |
+
# may be removed from output_names below
|
| 351 |
+
output_names = node.output.copy()
|
| 352 |
+
|
| 353 |
+
# Make maximum_iterations int64 and replace -1(tf) with maxsize(onnx). If the const node has no other
|
| 354 |
+
# consumers, modify it in place. Otherwise, make a new const node and leave the original unchanged.
|
| 355 |
+
# if maximum_iterations is not const,should add an cast node(cast to int64)
|
| 356 |
+
maximum_iterations_name = node.input[1]
|
| 357 |
+
if node.inputs[1].is_const():
|
| 358 |
+
maximum_iterations = node.inputs[1].get_tensor_value()
|
| 359 |
+
if maximum_iterations == -1:
|
| 360 |
+
maximum_iterations = np.iinfo(np.int64).max
|
| 361 |
+
consumers = ctx.find_output_consumers(maximum_iterations_name)
|
| 362 |
+
external_consumers = [c for c in consumers if c != node and c.type != 'TensorListReserve']
|
| 363 |
+
if len(external_consumers) == 0:
|
| 364 |
+
ctx.remove_node(node.inputs[1].name)
|
| 365 |
+
else:
|
| 366 |
+
maximum_iterations_name = utils.make_name(node.inputs[1].name)
|
| 367 |
+
ctx.make_const(maximum_iterations_name, np.array(maximum_iterations, dtype=np.int64))
|
| 368 |
+
ctx.replace_input(node, node.input[1], maximum_iterations_name, 1)
|
| 369 |
+
maximum_iterations_int64 = maximum_iterations_name
|
| 370 |
+
else:
|
| 371 |
+
cast_inputs = [maximum_iterations_name]
|
| 372 |
+
attr = {"to": onnx_pb.TensorProto.INT64}
|
| 373 |
+
cast_name = node.name + "_cast"
|
| 374 |
+
cast_node = ctx.make_node("Cast", cast_inputs, attr, name=cast_name)
|
| 375 |
+
maximum_iterations_int64 = cast_node.output[0]
|
| 376 |
+
|
| 377 |
+
cond_name = node.get_attr_str("cond")
|
| 378 |
+
cond_graph = find_function(cond_name)
|
| 379 |
+
cond_graph.parent_graph = ctx
|
| 380 |
+
|
| 381 |
+
body_name = node.get_attr_str("body")
|
| 382 |
+
body = find_function(body_name)
|
| 383 |
+
body.parent_graph = ctx
|
| 384 |
+
|
| 385 |
+
loop_vars = [] # passed into the loop
|
| 386 |
+
body_input_to_state_var = {} # Map from body input name to state var name
|
| 387 |
+
cond_input_to_state_var = {}
|
| 388 |
+
to_remove = []
|
| 389 |
+
input_idx_to_remove = []
|
| 390 |
+
# remove TensorListReserve
|
| 391 |
+
for idx, name in enumerate(tf_while_inputs):
|
| 392 |
+
if idx == 1:
|
| 393 |
+
# onnx does not know maximum_iterations in the body so move this to a state var
|
| 394 |
+
body_input_to_state_var[body.input_names[idx]] = maximum_iterations_name
|
| 395 |
+
cond_input_to_state_var[cond_graph.input_names[idx]] = maximum_iterations_name
|
| 396 |
+
continue
|
| 397 |
+
if idx < 2:
|
| 398 |
+
# skip [0,1] loop_counter, max_iterations
|
| 399 |
+
continue
|
| 400 |
+
n = node.inputs[idx]
|
| 401 |
+
if n.type in ["TensorListReserve", "TensorListResize"]:
|
| 402 |
+
# there is no equivalent step in onnx and we should remove it.
|
| 403 |
+
to_remove.append((idx, n))
|
| 404 |
+
continue
|
| 405 |
+
|
| 406 |
+
# tensor arrays we read from can't be loop_vars and we fetch them from the outer context instead
|
| 407 |
+
if body.input_names[idx] in body.ta_reads:
|
| 408 |
+
body_input_to_state_var[body.input_names[idx]] = name
|
| 409 |
+
cond_input_to_state_var[cond_graph.input_names[idx]] = name
|
| 410 |
+
input_idx_to_remove.append(idx)
|
| 411 |
+
else:
|
| 412 |
+
loop_vars.append(name)
|
| 413 |
+
|
| 414 |
+
# loop_vars that become state_vars need to be removed from output as well
|
| 415 |
+
for idx in reversed(input_idx_to_remove):
|
| 416 |
+
del output_shapes[idx]
|
| 417 |
+
del output_dtypes[idx]
|
| 418 |
+
del output_names[idx]
|
| 419 |
+
del body.outputs[idx]
|
| 420 |
+
|
| 421 |
+
scan_output_names = []
|
| 422 |
+
# remove tensor array that are passed in to the loop
|
| 423 |
+
for idx, n in reversed(to_remove):
|
| 424 |
+
ctx.remove_node(n.name)
|
| 425 |
+
# make the node output bad
|
| 426 |
+
ctx.replace_all_inputs(n.output[0], "@@ALLOC") # ops=ctx.get_nodes()
|
| 427 |
+
del body.inputs[idx]
|
| 428 |
+
del cond_graph.inputs[idx]
|
| 429 |
+
del tf_while_inputs[idx]
|
| 430 |
+
scan_output_names.append(body.outputs[idx])
|
| 431 |
+
del body.outputs[idx]
|
| 432 |
+
output_shapes.append(output_shapes[idx])
|
| 433 |
+
output_dtypes.append(output_dtypes[idx])
|
| 434 |
+
output_names.append(output_names[idx])
|
| 435 |
+
del output_shapes[idx]
|
| 436 |
+
del output_dtypes[idx]
|
| 437 |
+
del output_names[idx]
|
| 438 |
+
|
| 439 |
+
ctx.remove_node(node.name)
|
| 440 |
+
|
| 441 |
+
# In onnx 'cond' is a variable, not a function. We need to inject the subgraph into the main graph
|
| 442 |
+
# before the loop and into the body.
|
| 443 |
+
cond_binding = parameter_binding(cond_graph, tf_while_inputs)
|
| 444 |
+
cond_outputs = inline_subgraph(ctx, cond_graph, cond_name, cond_binding)
|
| 445 |
+
# onnx Loop op outputs only loop_vars so we need shift output dtypes/shapes and consumers
|
| 446 |
+
output_shapes = output_shapes[2:]
|
| 447 |
+
output_dtypes = output_dtypes[2:]
|
| 448 |
+
output_names = output_names[2:]
|
| 449 |
+
|
| 450 |
+
branches = {"body": body}
|
| 451 |
+
loop_node = ctx.make_node("Loop", [maximum_iterations_int64, cond_outputs[0]] + loop_vars,
|
| 452 |
+
output_count=len(output_shapes), name=node.name + "_loop",
|
| 453 |
+
shapes=output_shapes, dtypes=output_dtypes, skip_conversion=True,
|
| 454 |
+
branches=branches)
|
| 455 |
+
|
| 456 |
+
output_map = dict(zip(output_names, loop_node.output))
|
| 457 |
+
|
| 458 |
+
# shift output consumers
|
| 459 |
+
for k, v in output_map.items():
|
| 460 |
+
ctx.replace_all_inputs(k, v) # ops=ctx.get_nodes()
|
| 461 |
+
|
| 462 |
+
wire_while_body(ctx, body, loop_node.inputs, body_input_to_state_var, cond_input_to_state_var, output_shapes,
|
| 463 |
+
output_dtypes, body_name, node.name, cond_graph, tf_while_inputs, scan_output_names)
|
| 464 |
+
|
| 465 |
+
# if there was a tensorflow variant type, bind in a real type here
|
| 466 |
+
# FIXME: I don't think this is needed anymore
|
| 467 |
+
for i, n in enumerate(body.inputs):
|
| 468 |
+
if body.get_dtype(n.output[0]) == onnx_pb.TensorProto.UNDEFINED:
|
| 469 |
+
body.set_dtype(n.output[0], ctx.get_dtype(loop_node.input[i]))
|
| 470 |
+
|
| 471 |
+
|
| 472 |
+
def wire_while_body(parent_g, g, loop_node_inputs, body_input_to_state_var, cond_input_to_state_var, output_shapes,
|
| 473 |
+
output_dtypes, scope, parent, cond_graph, tf_while_inputs, scan_output_names):
|
| 474 |
+
"""Wire subgraph graph into main."""
|
| 475 |
+
remove_parents = []
|
| 476 |
+
to_remove = []
|
| 477 |
+
|
| 478 |
+
# tensorflow function inputs that are state_vars come from outer context and
|
| 479 |
+
# we need to remove them from the inputs by making the placeholder an identity
|
| 480 |
+
for n in g.inputs:
|
| 481 |
+
if n.output[0] in body_input_to_state_var:
|
| 482 |
+
n.type = "Identity"
|
| 483 |
+
g.replace_inputs(n, [body_input_to_state_var[n.output[0]]])
|
| 484 |
+
|
| 485 |
+
# onnx will pass in cond as argument
|
| 486 |
+
cond_node = g.make_node("Placeholder", [], name=utils.make_name("cond"),
|
| 487 |
+
output_count=1, dtypes=[onnx_pb.TensorProto.BOOL], shapes=[[]])
|
| 488 |
+
|
| 489 |
+
# in onnx the body inputs are: index, cond, [loop_vars]
|
| 490 |
+
func_inputs = [i for i in g.input_names[2:] if i not in body_input_to_state_var]
|
| 491 |
+
func_inputs = [g.input_names[0], cond_node.output[0]] + func_inputs
|
| 492 |
+
g.set_dtype(func_inputs[0], onnx_pb.TensorProto.INT64)
|
| 493 |
+
g.inputs = [g.get_node_by_output(inp) for inp in func_inputs]
|
| 494 |
+
|
| 495 |
+
for p, c in zip(loop_node_inputs, func_inputs):
|
| 496 |
+
shape = p.output_shapes[0]
|
| 497 |
+
g.set_shape(c, shape)
|
| 498 |
+
|
| 499 |
+
for i, node in enumerate(g.inputs):
|
| 500 |
+
if node.output[0] not in func_inputs:
|
| 501 |
+
remove_parents.append(node.output[0])
|
| 502 |
+
|
| 503 |
+
# this is a tensor array write - make it an identity
|
| 504 |
+
scan_outputs = []
|
| 505 |
+
for node in g.get_nodes():
|
| 506 |
+
if node.type == "TensorListSetItem":
|
| 507 |
+
remove_parents.append(node.input[0])
|
| 508 |
+
node.type = "Identity"
|
| 509 |
+
g.set_shape(node.output[0], g.get_shape(node.input[2]))
|
| 510 |
+
g.set_dtype(node.output[0], g.get_dtype(node.input[2]))
|
| 511 |
+
g.replace_inputs(node, [node.input[2]])
|
| 512 |
+
scan_outputs.append(node.output[0])
|
| 513 |
+
|
| 514 |
+
if len(scan_outputs) != len(scan_output_names):
|
| 515 |
+
raise ValueError("While loop couldn't find scan output index for nodes")
|
| 516 |
+
|
| 517 |
+
names_to_scan_outputs = {}
|
| 518 |
+
for output in scan_outputs:
|
| 519 |
+
last_output = output
|
| 520 |
+
consumers = g.find_output_consumers(last_output)
|
| 521 |
+
while consumers:
|
| 522 |
+
node = consumers[0]
|
| 523 |
+
if node.type != "Identity":
|
| 524 |
+
raise ValueError("While loop couldn't find scan output index for node " + node.name)
|
| 525 |
+
last_output = node.output[0]
|
| 526 |
+
consumers = g.find_output_consumers(last_output)
|
| 527 |
+
if last_output not in scan_output_names:
|
| 528 |
+
raise ValueError("While loop couldn't find scan output index for node " + node.name)
|
| 529 |
+
names_to_scan_outputs[last_output] = output
|
| 530 |
+
|
| 531 |
+
# Reorder scan outputs
|
| 532 |
+
scan_outputs = [names_to_scan_outputs[name] for name in scan_output_names]
|
| 533 |
+
|
| 534 |
+
# remove all nodes feeding to TensorListSetItem's reserved tensor
|
| 535 |
+
while remove_parents:
|
| 536 |
+
output_name = remove_parents[0]
|
| 537 |
+
del remove_parents[0]
|
| 538 |
+
node = g.get_node_by_output(output_name)
|
| 539 |
+
if node:
|
| 540 |
+
if output_name not in func_inputs:
|
| 541 |
+
if node.input:
|
| 542 |
+
remove_parents.extend(node.input)
|
| 543 |
+
g.remove_node(node.name)
|
| 544 |
+
|
| 545 |
+
for node in to_remove:
|
| 546 |
+
g.remove_node(node.name)
|
| 547 |
+
|
| 548 |
+
cond_binding = parameter_binding(cond_graph, func_inputs[:1] + g.outputs[2:], cond_input_to_state_var)
|
| 549 |
+
cond_outputs = inline_subgraph(g, cond_graph, "cond__", cond_binding)
|
| 550 |
+
|
| 551 |
+
g.outputs = [cond_outputs[0]] + g.outputs[2:] + scan_outputs
|
| 552 |
+
|
| 553 |
+
# FIXME: onnx does not have a variant type so we try to fish for the dtype in a prior TensorListSetItem.
|
| 554 |
+
for o in g.outputs:
|
| 555 |
+
if g.get_dtype(o) == onnx_pb.TensorProto.UNDEFINED:
|
| 556 |
+
node = g.get_node_by_output(o)
|
| 557 |
+
if node.type in ["Identity"]:
|
| 558 |
+
g.set_dtype(o, node.inputs[0].output_dtypes[0])
|
| 559 |
+
|
| 560 |
+
return g
|
| 561 |
+
|
| 562 |
+
|
| 563 |
+
def wire_if_branch(parent_g, g, inputs, output_shapes, output_dtypes, scope, parent):
|
| 564 |
+
"""Wire subgraph graph into main."""
|
| 565 |
+
binding = parameter_binding(g, inputs)
|
| 566 |
+
to_remove = []
|
| 567 |
+
for node in g.inputs:
|
| 568 |
+
parent_name = binding.get(node.output[0])
|
| 569 |
+
if parent_name and parent_name != "@@ALLOC":
|
| 570 |
+
g.replace_inputs(node, [parent_name])
|
| 571 |
+
node.type = "Identity"
|
| 572 |
+
else:
|
| 573 |
+
to_remove.append(node)
|
| 574 |
+
|
| 575 |
+
for node in to_remove:
|
| 576 |
+
g.remove_node(node.name)
|
| 577 |
+
|
| 578 |
+
prefix_graph(g, scope)
|
| 579 |
+
|
| 580 |
+
for shape, dtype, output_name in zip(output_shapes, output_dtypes, g.outputs):
|
| 581 |
+
g.set_shape(output_name, shape)
|
| 582 |
+
g.set_dtype(output_name, dtype)
|
| 583 |
+
|
| 584 |
+
return g
|
| 585 |
+
|
| 586 |
+
|
| 587 |
+
def inline_subgraph(parent, g, scope, binding):
|
| 588 |
+
# make a copy since we don't want to change the origianl graph
|
| 589 |
+
g = copy.deepcopy(g)
|
| 590 |
+
to_remove = []
|
| 591 |
+
for node in g.inputs:
|
| 592 |
+
parent_name = binding.get(node.output[0])
|
| 593 |
+
if parent_name and parent_name != "@@ALLOC":
|
| 594 |
+
g.replace_inputs(node, [parent_name])
|
| 595 |
+
node.type = "Identity"
|
| 596 |
+
else:
|
| 597 |
+
to_remove.append(node)
|
| 598 |
+
for node in to_remove:
|
| 599 |
+
g.remove_node(node.name)
|
| 600 |
+
prefix_graph(g, scope)
|
| 601 |
+
for n in g.get_nodes():
|
| 602 |
+
dtypes = n.output_dtypes
|
| 603 |
+
shapes = n.output_shapes
|
| 604 |
+
n.graph = parent
|
| 605 |
+
for name, shape, dtype in zip(n.output, shapes, dtypes):
|
| 606 |
+
# FIXME: don't access this directly
|
| 607 |
+
parent._output_shapes[name] = shape # pylint: disable=protected-access
|
| 608 |
+
parent._dtypes[name] = dtype # pylint: disable=protected-access
|
| 609 |
+
|
| 610 |
+
ops = parent.get_nodes() + g.get_nodes()
|
| 611 |
+
parent.reset_nodes(ops)
|
| 612 |
+
|
| 613 |
+
# copy output shape and dtype to parent graph
|
| 614 |
+
for name in g.outputs:
|
| 615 |
+
parent.set_dtype(name, g.get_dtype(name))
|
| 616 |
+
parent.set_shape(name, g.get_shape(name))
|
| 617 |
+
|
| 618 |
+
return g.outputs
|
| 619 |
+
|
| 620 |
+
|
| 621 |
+
def parameter_binding(g, inputs, state_vars=None):
|
| 622 |
+
binding = {}
|
| 623 |
+
i = 0
|
| 624 |
+
for k in g.input_names:
|
| 625 |
+
if state_vars and k in state_vars:
|
| 626 |
+
binding[k] = state_vars[k]
|
| 627 |
+
else:
|
| 628 |
+
binding[k] = inputs[i]
|
| 629 |
+
i += 1
|
| 630 |
+
utils.make_sure(i == len(inputs), "Parameter count mismatch while binding controlflow")
|
| 631 |
+
return binding
|
| 632 |
+
|
| 633 |
+
|
| 634 |
+
def prefix_graph(g, scope):
|
| 635 |
+
ops = g.get_nodes()[:]
|
| 636 |
+
to_remove = []
|
| 637 |
+
for node in ops:
|
| 638 |
+
output_shapes = node.output_shapes
|
| 639 |
+
output_dtypes = node.output_dtypes
|
| 640 |
+
attr = node.attr
|
| 641 |
+
if node.is_graph_input():
|
| 642 |
+
continue
|
| 643 |
+
branches = {}
|
| 644 |
+
attr_graphs = node.get_body_graphs()
|
| 645 |
+
if attr_graphs:
|
| 646 |
+
for k, v in attr_graphs.items():
|
| 647 |
+
branches[k] = v
|
| 648 |
+
new_node = g.make_node(node.type, node.input, name=node.name, output_count=len(node.output),
|
| 649 |
+
shapes=output_shapes, dtypes=output_dtypes, attr=attr,
|
| 650 |
+
op_name_scope=scope, skip_conversion=True, branches=branches)
|
| 651 |
+
for old_output, new_output in zip(node.output, new_node.output):
|
| 652 |
+
for i, oname in enumerate(g.outputs):
|
| 653 |
+
if old_output == oname:
|
| 654 |
+
g.outputs[i] = new_output
|
| 655 |
+
break
|
| 656 |
+
g.replace_all_inputs(old_output, new_output, ops=ops)
|
| 657 |
+
to_remove.append(node)
|
| 658 |
+
for node in to_remove:
|
| 659 |
+
g.remove_node(node.name)
|
| 660 |
+
|
| 661 |
+
|
| 662 |
+
def dump_graph(g):
|
| 663 |
+
print()
|
| 664 |
+
print("--, graph=", g.graph_name)
|
| 665 |
+
t = ["{} {}/{}".format(n.name, g.get_shape(n.output[0]), g.get_dtype(n.output[0])) for n in g.inputs]
|
| 666 |
+
print("--, inputs=", ", ".join(t))
|
| 667 |
+
t = ["{} {}/{}".format(n, g.get_shape(n), g.get_dtype(n)) for n in g.outputs]
|
| 668 |
+
print("--, outputs=", ", ".join(t))
|
| 669 |
+
for node in g.get_nodes():
|
| 670 |
+
input_names = ", ".join(["{} {}/{}".format(n, g.get_shape(n), g.get_dtype(n)) for n in node.input])
|
| 671 |
+
output_names = ", ".join(["{} {}/{}".format(n, g.get_shape(n), g.get_dtype(n)) for n in node.output])
|
| 672 |
+
print("-- {} n={} i={} o={}".format(node.type, node.name, input_names, output_names))
|
lib/python3.10/site-packages/tf2onnx/onnx_opset/generator.py
ADDED
|
@@ -0,0 +1,254 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
"""
|
| 5 |
+
generator
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
from __future__ import division
|
| 9 |
+
from __future__ import print_function
|
| 10 |
+
from __future__ import unicode_literals
|
| 11 |
+
|
| 12 |
+
import logging
|
| 13 |
+
|
| 14 |
+
import numpy as np
|
| 15 |
+
from onnx import onnx_pb, numpy_helper
|
| 16 |
+
from tf2onnx import utils
|
| 17 |
+
from tf2onnx.handler import tf_op
|
| 18 |
+
from tf2onnx.graph_builder import GraphBuilder
|
| 19 |
+
|
| 20 |
+
logger = logging.getLogger(__name__)
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
# pylint: disable=unused-argument,missing-docstring
|
| 24 |
+
|
| 25 |
+
@tf_op(["Const", "ConstV2"])
|
| 26 |
+
class DirectOp:
|
| 27 |
+
@classmethod
|
| 28 |
+
def version_1(cls, ctx, node, **kwargs):
|
| 29 |
+
pass
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
@tf_op(["RandomNormal", "RandomUniform", "RandomUniformInt"])
|
| 33 |
+
class RandomOp:
|
| 34 |
+
@classmethod
|
| 35 |
+
def randuniform_int(cls, ctx, rand_node, rand_out, min_inp, max_inp):
|
| 36 |
+
dtype = ctx.get_dtype(rand_out)
|
| 37 |
+
min_node = ctx.get_node_by_output(min_inp)
|
| 38 |
+
max_node = ctx.get_node_by_output(max_inp)
|
| 39 |
+
if min_node.is_const() and max_node.is_const():
|
| 40 |
+
rand_node.set_attr('low', float(min_node.get_tensor_value()))
|
| 41 |
+
rand_node.set_attr('high', float(max_node.get_tensor_value()))
|
| 42 |
+
out = rand_out
|
| 43 |
+
elif min_node.is_const() and min_node.get_tensor_value() == 0:
|
| 44 |
+
max_float = ctx.make_node("Cast", [max_inp], attr={'to': onnx_pb.TensorProto.FLOAT}).output[0]
|
| 45 |
+
mul_node = ctx.insert_new_node_on_output("Mul", rand_out, inputs=[rand_out, max_float])
|
| 46 |
+
out = mul_node.output[0]
|
| 47 |
+
else:
|
| 48 |
+
min_float = ctx.make_node("Cast", [min_inp], attr={'to': onnx_pb.TensorProto.FLOAT}).output[0]
|
| 49 |
+
max_float = ctx.make_node("Cast", [max_inp], attr={'to': onnx_pb.TensorProto.FLOAT}).output[0]
|
| 50 |
+
diff = ctx.make_node("Sub", [max_float, min_float]).output[0]
|
| 51 |
+
diff_float = ctx.make_node("Cast", [diff], attr={'to': onnx_pb.TensorProto.FLOAT}).output[0]
|
| 52 |
+
mul_node = ctx.insert_new_node_on_output("Mul", rand_out, inputs=[rand_out, diff_float])
|
| 53 |
+
mul = mul_node.output[0]
|
| 54 |
+
add_node = ctx.insert_new_node_on_output("Add", mul, inputs=[mul, min_float])
|
| 55 |
+
out = add_node.output[0]
|
| 56 |
+
floor_node = ctx.insert_new_node_on_output("Floor", out)
|
| 57 |
+
ctx.insert_new_node_on_output("Cast", floor_node.output[0], to=dtype)
|
| 58 |
+
|
| 59 |
+
@classmethod
|
| 60 |
+
def version_1(cls, ctx, node, **kwargs):
|
| 61 |
+
# in tf-2.0 grappler optimizes the graph pretty well and our matching logic
|
| 62 |
+
# in the rewriter does not trigger. grappler will send the random uniform
|
| 63 |
+
# with shape as input so we need to pickup the input here and if the shape is
|
| 64 |
+
# const we make it an attribute.
|
| 65 |
+
seed = node.get_attr("seed")
|
| 66 |
+
node.set_attr("seed", float(seed.f))
|
| 67 |
+
utils.make_sure(node.inputs[0].is_const(), "%s node with non-const shape requires opset >= 9")
|
| 68 |
+
shape = node.inputs[0].get_tensor_value()
|
| 69 |
+
ctx.remove_input(node, node.input[0], 0)
|
| 70 |
+
if len(shape) == 0:
|
| 71 |
+
# ORT can't take an empty shape (scalar)
|
| 72 |
+
node.set_attr("shape", [1])
|
| 73 |
+
ctx.set_shape(node.output[0], [1])
|
| 74 |
+
squeeze_node = GraphBuilder(ctx).make_squeeze({'data': node.output[0], 'axes': [0]}, return_node=True)
|
| 75 |
+
ctx.insert_node_on_output(squeeze_node, node.output[0])
|
| 76 |
+
rand_out = squeeze_node.output[0]
|
| 77 |
+
else:
|
| 78 |
+
node.set_attr("shape", shape)
|
| 79 |
+
ctx.set_shape(node.output[0], shape)
|
| 80 |
+
rand_out = node.output[0]
|
| 81 |
+
if node.type == "RandomUniformInt":
|
| 82 |
+
cls.randuniform_int(ctx, node, rand_out, node.input[0], node.input[1])
|
| 83 |
+
node.type = "RandomUniform"
|
| 84 |
+
ctx.replace_inputs(node, [])
|
| 85 |
+
|
| 86 |
+
@classmethod
|
| 87 |
+
def version_9(cls, ctx, node, **kwargs):
|
| 88 |
+
if node.inputs[0].is_const():
|
| 89 |
+
cls.version_1(ctx, node, **kwargs)
|
| 90 |
+
else:
|
| 91 |
+
seed = node.get_attr("seed")
|
| 92 |
+
node.set_attr("seed", float(seed.f))
|
| 93 |
+
cast_node = ctx.make_node("Cast", [node.input[0]], attr={'to': onnx_pb.TensorProto.INT64})
|
| 94 |
+
const_node = ctx.make_node("ConstantOfShape", cast_node.output)
|
| 95 |
+
inputs = node.input.copy()
|
| 96 |
+
ctx.replace_inputs(node, const_node.output.copy())
|
| 97 |
+
if node.type == "RandomUniformInt":
|
| 98 |
+
cls.randuniform_int(ctx, node, node.output[0], inputs[1], inputs[2])
|
| 99 |
+
node.type = "RandomUniformLike"
|
| 100 |
+
else:
|
| 101 |
+
node.type = node.type + 'Like'
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
@tf_op(["RandomNormalLike", "RandomUniformLike"])
|
| 105 |
+
class PassThroughOp:
|
| 106 |
+
@classmethod
|
| 107 |
+
def version_1(cls, ctx, node, **kwargs):
|
| 108 |
+
pass
|
| 109 |
+
|
| 110 |
+
@tf_op("Fill")
|
| 111 |
+
class Fill:
|
| 112 |
+
@classmethod
|
| 113 |
+
def version_7(cls, ctx, node, **kwargs):
|
| 114 |
+
# T output = Fill(int32 dims, T value, @int32 index_type)
|
| 115 |
+
# T outputs = Tile(T value, int64 repeats (e.g. dims))
|
| 116 |
+
fill_shape = ctx.get_shape(node.input[0])
|
| 117 |
+
utils.make_sure(fill_shape is not None, "shape of {} is None".format(node.input[0]))
|
| 118 |
+
fill_shape_dims = fill_shape[0]
|
| 119 |
+
utils.make_sure(fill_shape_dims > 0, "opset 7 requires fill shape length > 0, or please try opset > 7")
|
| 120 |
+
val_dtype = ctx.get_dtype(node.input[1])
|
| 121 |
+
val_shape = ctx.get_shape(node.input[1])
|
| 122 |
+
|
| 123 |
+
need_cast = val_dtype != onnx_pb.TensorProto.FLOAT and ctx.opset < 9
|
| 124 |
+
new_dtype = val_dtype
|
| 125 |
+
if need_cast:
|
| 126 |
+
new_dtype = onnx_pb.TensorProto.FLOAT
|
| 127 |
+
attr = {"to": new_dtype}
|
| 128 |
+
cast_to_float = ctx.insert_new_node_on_input(node, "Cast", node.input[1], name=None, **attr)
|
| 129 |
+
ctx.set_dtype(cast_to_float.output[0], new_dtype)
|
| 130 |
+
ctx.set_shape(cast_to_float.output[0], val_shape)
|
| 131 |
+
|
| 132 |
+
for _ in range(fill_shape_dims):
|
| 133 |
+
attr = {"axes": [0]}
|
| 134 |
+
shape = ctx.get_shape(node.input[1])
|
| 135 |
+
unsqueeze_node = ctx.insert_new_node_on_input(node, "Unsqueeze", node.input[1], name=None, **attr)
|
| 136 |
+
ctx.set_dtype(unsqueeze_node.output[0], new_dtype)
|
| 137 |
+
if shape:
|
| 138 |
+
shape = [1] + shape
|
| 139 |
+
else:
|
| 140 |
+
shape = [1]
|
| 141 |
+
ctx.set_shape(unsqueeze_node.output[0], shape)
|
| 142 |
+
|
| 143 |
+
# Tile's repeats must be INT64
|
| 144 |
+
attr = {"to": onnx_pb.TensorProto.INT64}
|
| 145 |
+
tile_shape_int64 = ctx.insert_new_node_on_input(node, "Cast", node.input[0], name=None, **attr)
|
| 146 |
+
ctx.set_dtype(tile_shape_int64.output[0], onnx_pb.TensorProto.INT64)
|
| 147 |
+
ctx.set_shape(tile_shape_int64.output[0], fill_shape)
|
| 148 |
+
|
| 149 |
+
tmp = node.input[0]
|
| 150 |
+
ctx.replace_input(node, node.input[0], node.input[1], 0)
|
| 151 |
+
ctx.replace_input(node, node.input[1], tmp, 1)
|
| 152 |
+
node.type = "Tile"
|
| 153 |
+
ctx.set_dtype(node.output[0], new_dtype)
|
| 154 |
+
|
| 155 |
+
if need_cast:
|
| 156 |
+
attr = {"to": val_dtype}
|
| 157 |
+
op_name = utils.make_name(node.name + "/cast_back")
|
| 158 |
+
cast_back = ctx.insert_new_node_on_output("Cast", node.output[0], name=op_name, **attr)
|
| 159 |
+
ctx.set_dtype(cast_back.output[0], val_dtype)
|
| 160 |
+
|
| 161 |
+
@classmethod
|
| 162 |
+
def version_9(cls, ctx, node, **kwargs):
|
| 163 |
+
node.type = "ConstantOfShape"
|
| 164 |
+
# both shape and value in tensorflow are passed as tensor.
|
| 165 |
+
# In onnx the value is an attribute so we need to fetch the value as const which
|
| 166 |
+
# sooner or later will be a problem for tensorflow-onnx.
|
| 167 |
+
# ConstantOfShape in onnxruntime only support int64, so insert cast op
|
| 168 |
+
input_dtype_is_int64 = utils.map_onnx_to_numpy_type(ctx.get_dtype(node.input[0])) == np.int64
|
| 169 |
+
if not input_dtype_is_int64:
|
| 170 |
+
ctx.insert_new_node_on_input(node, "Cast", node.input[0], to=onnx_pb.TensorProto.INT64)
|
| 171 |
+
dtype = ctx.get_dtype(node.output[0])
|
| 172 |
+
value = np.array([node.inputs[1].get_tensor_value()]).astype(utils.map_onnx_to_numpy_type(dtype))
|
| 173 |
+
value_proto = numpy_helper.from_array(value)
|
| 174 |
+
node.set_attr("value", value_proto)
|
| 175 |
+
ctx.remove_input(node, node.input[1], 1)
|
| 176 |
+
|
| 177 |
+
@classmethod
|
| 178 |
+
def version_11(cls, ctx, node, **kwargs):
|
| 179 |
+
# cls.version_7(ctx, node, **kwargs)
|
| 180 |
+
node.type = "Expand"
|
| 181 |
+
ctx.replace_inputs(node, [node.input[1], node.input[0]])
|
| 182 |
+
# cast shape to int64 if needed
|
| 183 |
+
if ctx.get_dtype(node.input[1]) != onnx_pb.TensorProto.INT64:
|
| 184 |
+
ctx.insert_new_node_on_input(node, "Cast", node.input[1], to=onnx_pb.TensorProto.INT64)
|
| 185 |
+
|
| 186 |
+
|
| 187 |
+
@tf_op("Multinomial")
|
| 188 |
+
class Multinomial:
|
| 189 |
+
@classmethod
|
| 190 |
+
def version_7(cls, ctx, node, **kwargs):
|
| 191 |
+
# output_dtype output = Multinomial(T logits, int32 num_samples, @int seed, @int seed2, @type output_dtype)
|
| 192 |
+
sample_size = node.inputs[1].get_tensor_value()
|
| 193 |
+
seed = node.get_attr("seed")
|
| 194 |
+
if seed:
|
| 195 |
+
node.set_attr("seed", float(seed.i))
|
| 196 |
+
output_dtype = node.get_attr("output_dtype")
|
| 197 |
+
if output_dtype:
|
| 198 |
+
output_dtype = output_dtype.i
|
| 199 |
+
else:
|
| 200 |
+
output_dtype = onnx_pb.TensorProto.INT32
|
| 201 |
+
node.set_attr("dtype", output_dtype)
|
| 202 |
+
node.set_attr("sample_size", sample_size)
|
| 203 |
+
ctx.remove_input(node, node.input[1], 1)
|
| 204 |
+
|
| 205 |
+
|
| 206 |
+
@tf_op("ZerosLike")
|
| 207 |
+
class ZerosLike:
|
| 208 |
+
@classmethod
|
| 209 |
+
def version_1(cls, ctx, node, **kwargs):
|
| 210 |
+
shapes = node.output_shapes
|
| 211 |
+
dtypes = node.output_dtypes
|
| 212 |
+
ctx.remove_node(node.name)
|
| 213 |
+
casted_input = ctx.make_node("Cast", node.input, attr={'to': onnx_pb.TensorProto.INT64})
|
| 214 |
+
const_zero = ctx.make_const(utils.make_name("zero"), np.array(0).astype(np.int64))
|
| 215 |
+
mul_node = ctx.make_node('Mul', inputs=[casted_input.output[0], const_zero.output[0]])
|
| 216 |
+
ctx.make_node("Cast", inputs=[mul_node.output[0]],
|
| 217 |
+
attr={'to': dtypes[0]},
|
| 218 |
+
name=node.name, outputs=node.output,
|
| 219 |
+
shapes=shapes, dtypes=dtypes)
|
| 220 |
+
|
| 221 |
+
|
| 222 |
+
@tf_op(["IteratorV2", "FIFOQueueV2"])
|
| 223 |
+
class Iterator:
|
| 224 |
+
@classmethod
|
| 225 |
+
def version_8(cls, ctx, node, **kwargs):
|
| 226 |
+
ctx.remove_node(node.name)
|
| 227 |
+
|
| 228 |
+
|
| 229 |
+
@tf_op(["IteratorGetNext", "QueueDequeueV2"])
|
| 230 |
+
class IteratorGetNext:
|
| 231 |
+
@classmethod
|
| 232 |
+
def version_8(cls, ctx, node, **kwargs):
|
| 233 |
+
output_names = node.output.copy() # to make sure remove_node
|
| 234 |
+
# does not alter the list
|
| 235 |
+
type_0 = ctx.get_dtype(output_names[0])
|
| 236 |
+
type_1 = ctx.get_dtype(output_names[1])
|
| 237 |
+
shape_0 = ctx.get_shape(output_names[0])
|
| 238 |
+
shape_1 = ctx.get_shape(output_names[1])
|
| 239 |
+
ctx.remove_node(node.name)
|
| 240 |
+
ctx.add_graph_input(output_names[0], type_0, shape_0)
|
| 241 |
+
ctx.add_graph_input(output_names[1], type_1, shape_1)
|
| 242 |
+
|
| 243 |
+
|
| 244 |
+
@tf_op(["QueueDequeueManyV2", "QueueDequeueUpToV2"])
|
| 245 |
+
class QueueDequeueManyV2:
|
| 246 |
+
@classmethod
|
| 247 |
+
def version_8(cls, ctx, node, **kwargs):
|
| 248 |
+
outputs = node.output.copy() # copy to make remove_node
|
| 249 |
+
# does not alter the list
|
| 250 |
+
shapes = node.output_shapes
|
| 251 |
+
dtypes = node.output_dtypes
|
| 252 |
+
ctx.remove_node(node.name)
|
| 253 |
+
for i, output in enumerate(outputs):
|
| 254 |
+
ctx.add_graph_input(output, dtypes[i], shapes[i])
|
lib/python3.10/site-packages/tf2onnx/onnx_opset/logical.py
ADDED
|
@@ -0,0 +1,137 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
"""
|
| 5 |
+
logical
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
from __future__ import division
|
| 9 |
+
from __future__ import print_function
|
| 10 |
+
from __future__ import unicode_literals
|
| 11 |
+
|
| 12 |
+
import logging
|
| 13 |
+
|
| 14 |
+
from onnx import TensorProto
|
| 15 |
+
from tf2onnx import utils
|
| 16 |
+
from tf2onnx.handler import tf_op
|
| 17 |
+
from tf2onnx.onnx_opset import common
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
logger = logging.getLogger(__name__)
|
| 21 |
+
|
| 22 |
+
# pylint: disable=unused-argument,missing-docstring
|
| 23 |
+
|
| 24 |
+
def _add_cast_to_inputs(graph, node, supported_dtypes, target_dtype):
|
| 25 |
+
is_support = True
|
| 26 |
+
for inp in node.input:
|
| 27 |
+
if graph.get_dtype(inp) not in supported_dtypes:
|
| 28 |
+
is_support = False
|
| 29 |
+
break
|
| 30 |
+
if not is_support:
|
| 31 |
+
for inp in node.input:
|
| 32 |
+
inp_cast = graph.insert_new_node_on_input(node, "Cast", inp, to=target_dtype)
|
| 33 |
+
graph.copy_shape(inp, inp_cast.output[0])
|
| 34 |
+
graph.set_dtype(inp_cast.output[0], target_dtype)
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def _add_cast_to_same_type_to_inputs(graph, node):
|
| 38 |
+
common_dtype = graph.get_dtype(node.input[0])
|
| 39 |
+
|
| 40 |
+
for inp in node.input[1:]:
|
| 41 |
+
if graph.get_dtype(inp) != common_dtype:
|
| 42 |
+
inp_cast = graph.insert_new_node_on_input(node, "Cast", inp, to=common_dtype)
|
| 43 |
+
graph.copy_shape(inp, inp_cast.output[0])
|
| 44 |
+
graph.set_dtype(inp_cast.output[0], common_dtype)
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
@tf_op("LogicalNot", onnx_op="Not")
|
| 48 |
+
class DirectOp:
|
| 49 |
+
@classmethod
|
| 50 |
+
def version_1(cls, ctx, node, **kwargs):
|
| 51 |
+
pass
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
@tf_op("LogicalAnd", onnx_op="And")
|
| 55 |
+
@tf_op("LogicalOr", onnx_op="Or")
|
| 56 |
+
class BroadcastOp(common.BroadcastOp):
|
| 57 |
+
pass
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
@tf_op(["Equal", "NotEqual"])
|
| 61 |
+
class Equal:
|
| 62 |
+
@classmethod
|
| 63 |
+
def version_1(cls, ctx, node, **kwargs):
|
| 64 |
+
need_not = node.type == "NotEqual"
|
| 65 |
+
common.BroadcastOp.version_1(ctx, node, **kwargs)
|
| 66 |
+
if need_not:
|
| 67 |
+
node.type = "Equal"
|
| 68 |
+
output_name = node.output[0]
|
| 69 |
+
not_node = ctx.insert_new_node_on_output("Not", output_name, name=utils.make_name(node.name))
|
| 70 |
+
ctx.copy_shape(output_name, not_node.output[0])
|
| 71 |
+
ctx.copy_dtype(output_name, not_node.output[0])
|
| 72 |
+
|
| 73 |
+
@classmethod
|
| 74 |
+
def version_7(cls, ctx, node, **kwargs):
|
| 75 |
+
# T2 output = Equal(T1, x, T1 y), T1 \in {bool, int32, int64}
|
| 76 |
+
need_not = node.type == "NotEqual"
|
| 77 |
+
supported_dtypes = [
|
| 78 |
+
TensorProto.BOOL,
|
| 79 |
+
TensorProto.INT32,
|
| 80 |
+
TensorProto.INT64
|
| 81 |
+
]
|
| 82 |
+
# FIXME: casting is not the same as equal
|
| 83 |
+
target_dtype = TensorProto.INT32
|
| 84 |
+
_add_cast_to_inputs(ctx, node, supported_dtypes, target_dtype)
|
| 85 |
+
if need_not:
|
| 86 |
+
node.type = "Equal"
|
| 87 |
+
output_name = node.output[0]
|
| 88 |
+
not_node = ctx.insert_new_node_on_output("Not", output_name, name=utils.make_name(node.name))
|
| 89 |
+
ctx.copy_shape(output_name, not_node.output[0])
|
| 90 |
+
ctx.copy_dtype(output_name, not_node.output[0])
|
| 91 |
+
|
| 92 |
+
@classmethod
|
| 93 |
+
def version_11(cls, ctx, node, **kwargs):
|
| 94 |
+
# starting with opset-11, equal supports all types (but both operands must be of the same type)
|
| 95 |
+
_add_cast_to_same_type_to_inputs(ctx, node)
|
| 96 |
+
need_not = node.type == "NotEqual"
|
| 97 |
+
if need_not:
|
| 98 |
+
node.type = "Equal"
|
| 99 |
+
output_name = node.output[0]
|
| 100 |
+
not_node = ctx.insert_new_node_on_output("Not", output_name, name=utils.make_name(node.name))
|
| 101 |
+
ctx.copy_shape(output_name, not_node.output[0])
|
| 102 |
+
ctx.copy_dtype(output_name, not_node.output[0])
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
@tf_op(["Greater", "Less"])
|
| 106 |
+
class GreaterLess:
|
| 107 |
+
@classmethod
|
| 108 |
+
def version_1(cls, ctx, node, **kwargs):
|
| 109 |
+
common.BroadcastOp.version_1(ctx, node, **kwargs)
|
| 110 |
+
|
| 111 |
+
@classmethod
|
| 112 |
+
def version_7(cls, ctx, node, **kwargs):
|
| 113 |
+
# T2 output = Greater(T1 x, T1 y), T2=tensor(bool)
|
| 114 |
+
# T2 output = Less(T1 x, T1 y), T2=tensor(bool)
|
| 115 |
+
# Great/Less in opset7 only supports limited types, insert Cast if needed
|
| 116 |
+
supported_dtypes = [
|
| 117 |
+
TensorProto.FLOAT,
|
| 118 |
+
TensorProto.FLOAT16,
|
| 119 |
+
TensorProto.DOUBLE
|
| 120 |
+
]
|
| 121 |
+
target_dtype = TensorProto.FLOAT
|
| 122 |
+
_add_cast_to_inputs(ctx, node, supported_dtypes, target_dtype)
|
| 123 |
+
|
| 124 |
+
@tf_op(["GreaterEqual", "LessEqual"])
|
| 125 |
+
class GreaterLessEqual:
|
| 126 |
+
@classmethod
|
| 127 |
+
def version_7(cls, ctx, node, **kwargs):
|
| 128 |
+
GreaterLess.version_7(ctx, node, **kwargs)
|
| 129 |
+
output_name = node.output[0]
|
| 130 |
+
node.op.op_type = "Less" if node.op.op_type == "GreaterEqual" else "Greater"
|
| 131 |
+
new_node = ctx.insert_new_node_on_output("Not", output_name, name=utils.make_name(node.name))
|
| 132 |
+
ctx.copy_shape(output_name, new_node.output[0])
|
| 133 |
+
ctx.set_dtype(new_node.output[0], ctx.get_dtype(output_name))
|
| 134 |
+
|
| 135 |
+
@classmethod
|
| 136 |
+
def version_12(cls, ctx, node, **kwargs):
|
| 137 |
+
node.op.op_type = "GreaterOrEqual" if node.op.op_type == "GreaterEqual" else "LessOrEqual"
|
lib/python3.10/site-packages/tf2onnx/onnx_opset/math.py
ADDED
|
@@ -0,0 +1,740 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
"""
|
| 5 |
+
math
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
from __future__ import division
|
| 9 |
+
from __future__ import print_function
|
| 10 |
+
from __future__ import unicode_literals
|
| 11 |
+
|
| 12 |
+
import logging
|
| 13 |
+
|
| 14 |
+
import numpy as np
|
| 15 |
+
from onnx import onnx_pb
|
| 16 |
+
from tf2onnx import constants, utils
|
| 17 |
+
from tf2onnx.handler import tf_op
|
| 18 |
+
from tf2onnx.onnx_opset import common
|
| 19 |
+
|
| 20 |
+
logger = logging.getLogger(__name__)
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
# pylint: disable=unused-argument,missing-docstring
|
| 24 |
+
|
| 25 |
+
@tf_op(["Add", "AddV2", "Div", "Mul", "Sub"])
|
| 26 |
+
class BroadcastOp(common.BroadcastOp):
|
| 27 |
+
pass
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
@tf_op(["RealDiv", "TruncateDiv"], onnx_op="Div")
|
| 31 |
+
class RealDiv(common.BroadcastOp):
|
| 32 |
+
pass
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
@tf_op(["LeakyRelu", "Softplus", "Softsign"])
|
| 36 |
+
class DirectOpSinceOpset1:
|
| 37 |
+
@classmethod
|
| 38 |
+
def version_1(cls, ctx, node, **kwargs):
|
| 39 |
+
pass
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
@tf_op(["Abs", "Ceil", "Elu", "Exp", "Floor", "Log", "Neg", "Relu", "Sigmoid", "Sqrt",
|
| 43 |
+
"Tanh", "Reciprocal"])
|
| 44 |
+
class DirectOp:
|
| 45 |
+
@classmethod
|
| 46 |
+
def version_1(cls, ctx, node, **kwargs):
|
| 47 |
+
pass
|
| 48 |
+
|
| 49 |
+
@classmethod
|
| 50 |
+
def version_6(cls, ctx, node, **kwargs):
|
| 51 |
+
if node.type == "Log":
|
| 52 |
+
# ORT doesn't implement Log on doubles
|
| 53 |
+
double_to_float = {onnx_pb.TensorProto.DOUBLE: onnx_pb.TensorProto.FLOAT}
|
| 54 |
+
node.maybe_cast_input([[onnx_pb.TensorProto.FLOAT]], double_to_float)
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
@tf_op(["Acos", "Asin", "Atan", "Cos", "Sin", "Tan"])
|
| 58 |
+
class TrigOpSinceOpset7:
|
| 59 |
+
@classmethod
|
| 60 |
+
def version_7(cls, ctx, node, **kwargs):
|
| 61 |
+
pass
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
@tf_op(["Acosh", "Asinh", "Atanh", "Cosh", "Sinh"])
|
| 65 |
+
class TrigOpSinceOpset9:
|
| 66 |
+
@classmethod
|
| 67 |
+
def version_9(cls, ctx, node, **kwargs):
|
| 68 |
+
pass
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
def make_min_or_max_op(ctx, op_type, inputs, outputs,
|
| 72 |
+
output_shapes=None, output_dtypes=None):
|
| 73 |
+
# support more dtype
|
| 74 |
+
supported_dtypes = [
|
| 75 |
+
onnx_pb.TensorProto.FLOAT,
|
| 76 |
+
onnx_pb.TensorProto.FLOAT16,
|
| 77 |
+
onnx_pb.TensorProto.DOUBLE
|
| 78 |
+
]
|
| 79 |
+
target_dtype = onnx_pb.TensorProto.FLOAT
|
| 80 |
+
need_cast = False
|
| 81 |
+
cast_inputs = []
|
| 82 |
+
for inp in inputs:
|
| 83 |
+
dtype = ctx.get_dtype(inp)
|
| 84 |
+
utils.make_sure(dtype is not None, "dtype of {} is None".format(inp))
|
| 85 |
+
if dtype not in supported_dtypes:
|
| 86 |
+
cast_inp = ctx.make_node("Cast", [inp], attr={"to": target_dtype})
|
| 87 |
+
cast_inputs.append(cast_inp.output[0])
|
| 88 |
+
need_cast = True
|
| 89 |
+
else:
|
| 90 |
+
cast_inputs.append(inp)
|
| 91 |
+
node = ctx.make_node(op_type, cast_inputs, shapes=output_shapes)
|
| 92 |
+
actual_outputs = node.output
|
| 93 |
+
if need_cast:
|
| 94 |
+
origin_dtype = ctx.get_dtype(inputs[0])
|
| 95 |
+
if output_dtypes is not None:
|
| 96 |
+
origin_dtype = output_dtypes[0]
|
| 97 |
+
ctx.set_dtype(node.output[0], target_dtype)
|
| 98 |
+
cast_name = utils.make_name(node.name)
|
| 99 |
+
cast_node = ctx.insert_new_node_on_output("Cast", node.output[0], name=cast_name, to=origin_dtype)
|
| 100 |
+
ctx.set_dtype(cast_node.output[0], origin_dtype)
|
| 101 |
+
ctx.copy_shape(node.output[0], cast_node.output[0])
|
| 102 |
+
actual_outputs = cast_node.output
|
| 103 |
+
final_node = ctx.make_node("Identity", actual_outputs, outputs=outputs,
|
| 104 |
+
shapes=output_shapes, dtypes=output_dtypes)
|
| 105 |
+
|
| 106 |
+
# tensorflow minimum/maximum does support broadcast, onnx < opset 8 does not.
|
| 107 |
+
# handle this by doing something like:
|
| 108 |
+
# y = min(x1, add(x2, sub(x1, x1))), where x1, x2 are the inputs and x2 is a scalar
|
| 109 |
+
# this will create a tensor of zeros of the shape of x1, adds x2 to it (which broadcasts) and use that for min.
|
| 110 |
+
shapeo = ctx.get_shape(node.output[0])
|
| 111 |
+
needs_broadcast_op = []
|
| 112 |
+
has_correct_shape = []
|
| 113 |
+
if ctx.opset < 8:
|
| 114 |
+
for i, input_name in enumerate(node.input):
|
| 115 |
+
if ctx.get_shape(input_name) != shapeo:
|
| 116 |
+
needs_broadcast_op.append(i)
|
| 117 |
+
else:
|
| 118 |
+
has_correct_shape.append(input_name)
|
| 119 |
+
if needs_broadcast_op:
|
| 120 |
+
has_correct_shape = has_correct_shape[0]
|
| 121 |
+
for i in needs_broadcast_op:
|
| 122 |
+
input_node = node.inputs[i]
|
| 123 |
+
# get a tensor with zeros (since there is no Fill op as of opset8)
|
| 124 |
+
sub_node = ctx.make_node("Sub", [has_correct_shape, has_correct_shape],
|
| 125 |
+
op_name_scope=input_node.name)
|
| 126 |
+
# use add as 'broadcast' op
|
| 127 |
+
add_node = ctx.make_node("Add", [input_node.output[0], sub_node.output[0]],
|
| 128 |
+
op_name_scope=input_node.name)
|
| 129 |
+
ctx.replace_input(node, node.input[i], add_node.output[0], i)
|
| 130 |
+
return final_node
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
@tf_op("Minimum", onnx_op="Min")
|
| 134 |
+
@tf_op("Maximum", onnx_op="Max")
|
| 135 |
+
class MinMaxOp:
|
| 136 |
+
@classmethod
|
| 137 |
+
def version_1(cls, ctx, node, **kwargs):
|
| 138 |
+
shapes = node.output_shapes
|
| 139 |
+
dtypes = node.output_dtypes
|
| 140 |
+
ctx.remove_node(node.name)
|
| 141 |
+
make_min_or_max_op(ctx, node.type, node.input, node.output, shapes, dtypes)
|
| 142 |
+
|
| 143 |
+
@classmethod
|
| 144 |
+
def version_12(cls, ctx, node, **kwargs):
|
| 145 |
+
pass # support all numeric types and broadcasting
|
| 146 |
+
|
| 147 |
+
@tf_op("ClipByValue")
|
| 148 |
+
class ClipByValueOp:
|
| 149 |
+
# in tf-1.8 there was a ClipByValue op which in later versions was replaced by max(min(x, a), b)
|
| 150 |
+
# To support models generated with tf-1.8 rewrite the tf ClipByValue op to max(min(x, a), b)
|
| 151 |
+
@classmethod
|
| 152 |
+
def version_8(cls, ctx, node, **kwargs):
|
| 153 |
+
supported = [onnx_pb.TensorProto.FLOAT16, onnx_pb.TensorProto.FLOAT, onnx_pb.TensorProto.DOUBLE]
|
| 154 |
+
# fetch those upfront since they are not accessible once we remove 'node'
|
| 155 |
+
shapes = node.output_shapes
|
| 156 |
+
dtypes = node.output_dtypes
|
| 157 |
+
input_dtype = ctx.get_dtype(node.input[0])
|
| 158 |
+
name = node.name
|
| 159 |
+
min_node = node.input[1]
|
| 160 |
+
if ctx.get_dtype(min_node) not in supported:
|
| 161 |
+
# cast min if needed
|
| 162 |
+
min_node = ctx.insert_new_node_on_input(node, "Cast", min_node, to=onnx_pb.TensorProto.FLOAT).output[0]
|
| 163 |
+
max_node = node.input[2]
|
| 164 |
+
if ctx.get_dtype(max_node) not in supported:
|
| 165 |
+
# cast max if needed
|
| 166 |
+
max_node = ctx.insert_new_node_on_input(node, "Cast", max_node, to=onnx_pb.TensorProto.FLOAT).output[0]
|
| 167 |
+
ctx.remove_node(name)
|
| 168 |
+
new_node = ctx.make_node("Max", [node.input[0], min_node], outputs=[node.output[0]],
|
| 169 |
+
shapes=shapes, dtypes=dtypes)
|
| 170 |
+
if input_dtype not in supported:
|
| 171 |
+
# cast the data tensor if needed
|
| 172 |
+
ctx.insert_new_node_on_input(new_node, "Cast", new_node.input[0], to=onnx_pb.TensorProto.FLOAT)
|
| 173 |
+
|
| 174 |
+
new_node = ctx.insert_new_node_on_output("Min", new_node.output[0], name=utils.make_name(name))
|
| 175 |
+
new_node.input.append(max_node)
|
| 176 |
+
# copy shape and type
|
| 177 |
+
ctx.set_dtype(new_node.output[0], dtypes[0])
|
| 178 |
+
ctx.set_shape(new_node.output[0], shapes[0])
|
| 179 |
+
if dtypes[0] not in supported:
|
| 180 |
+
# cast output if needed
|
| 181 |
+
new_node = ctx.insert_new_node_on_output("Cast", new_node.output[0],
|
| 182 |
+
name=utils.make_name(name), to=dtypes[0])
|
| 183 |
+
# copy shape and type
|
| 184 |
+
ctx.set_dtype(new_node.output[0], dtypes[0])
|
| 185 |
+
ctx.set_shape(new_node.output[0], shapes[0])
|
| 186 |
+
|
| 187 |
+
@classmethod
|
| 188 |
+
def version_12(cls, ctx, node, **kwargs):
|
| 189 |
+
node.type = 'Clip' # clip supports all types now
|
| 190 |
+
|
| 191 |
+
@tf_op(["LogSoftmax", "Softmax"])
|
| 192 |
+
class Softmax:
|
| 193 |
+
@classmethod
|
| 194 |
+
def version_1(cls, ctx, node, **kwargs):
|
| 195 |
+
# T output = Softmax(T logits). The axis softmax would be performed on is always on -1.
|
| 196 |
+
# T output = Softmax(T input, @int axis). Default axis is 1.
|
| 197 |
+
logits_rank = len(ctx.get_shape(node.input[0]))
|
| 198 |
+
node.set_attr("axis", logits_rank - 1)
|
| 199 |
+
|
| 200 |
+
@classmethod
|
| 201 |
+
def version_11(cls, ctx, node, **kwargs):
|
| 202 |
+
cls.version_1(ctx, node, **kwargs)
|
| 203 |
+
|
| 204 |
+
@classmethod
|
| 205 |
+
def version_13(cls, ctx, node, **kwargs):
|
| 206 |
+
# Default axis is now -1.
|
| 207 |
+
pass
|
| 208 |
+
|
| 209 |
+
|
| 210 |
+
@tf_op("Square")
|
| 211 |
+
class Square:
|
| 212 |
+
@classmethod
|
| 213 |
+
def version_1(cls, ctx, node, **kwargs):
|
| 214 |
+
node.type = "Mul"
|
| 215 |
+
node.input.append(node.input[0])
|
| 216 |
+
|
| 217 |
+
|
| 218 |
+
@tf_op("Relu6")
|
| 219 |
+
class Relu6:
|
| 220 |
+
@classmethod
|
| 221 |
+
def version_1(cls, ctx, node, **kwargs):
|
| 222 |
+
# relu6 = min(max(features, 0), 6)
|
| 223 |
+
# relu6 = min(max(features, 0), 6)
|
| 224 |
+
node.type = "Clip"
|
| 225 |
+
node.set_attr("min", 0.0)
|
| 226 |
+
node.set_attr("max", 6.0)
|
| 227 |
+
|
| 228 |
+
@classmethod
|
| 229 |
+
def version_11(cls, ctx, node, **kwargs):
|
| 230 |
+
# add min and max as inputs
|
| 231 |
+
node.type = "Clip"
|
| 232 |
+
onnx_dtype = ctx.get_dtype(node.input[0])
|
| 233 |
+
np_dtype = utils.ONNX_TO_NUMPY_DTYPE[onnx_dtype]
|
| 234 |
+
clip_min = ctx.make_const(utils.make_name("{}_min".format(node.name)), np.array(0.0, dtype=np_dtype))
|
| 235 |
+
clip_max = ctx.make_const(utils.make_name("{}_max".format(node.name)), np.array(6.0, dtype=np_dtype))
|
| 236 |
+
node.input.append(clip_min.output[0])
|
| 237 |
+
node.input.append(clip_max.output[0])
|
| 238 |
+
|
| 239 |
+
|
| 240 |
+
@tf_op("Rsqrt")
|
| 241 |
+
class Rsqrt:
|
| 242 |
+
@classmethod
|
| 243 |
+
def version_1(cls, ctx, node, **kwargs):
|
| 244 |
+
node.type = "Sqrt"
|
| 245 |
+
op_name = utils.make_name(node.name)
|
| 246 |
+
reciprocal = ctx.insert_new_node_on_output("Reciprocal", node.output[0], name=op_name)
|
| 247 |
+
ctx.copy_shape(node.output[0], reciprocal.output[0])
|
| 248 |
+
|
| 249 |
+
|
| 250 |
+
@tf_op("SquaredDifference")
|
| 251 |
+
class SquaredDifference:
|
| 252 |
+
@classmethod
|
| 253 |
+
def version_1(cls, ctx, node, **kwargs):
|
| 254 |
+
node.type = "Sub"
|
| 255 |
+
op_name = utils.make_name(node.name)
|
| 256 |
+
mul = ctx.insert_new_node_on_output("Mul", node.output[0], name=op_name)
|
| 257 |
+
mul.input.append(node.output[0])
|
| 258 |
+
|
| 259 |
+
|
| 260 |
+
@tf_op("Sign")
|
| 261 |
+
class Sign:
|
| 262 |
+
@classmethod
|
| 263 |
+
def version_1(cls, ctx, node, **kwargs):
|
| 264 |
+
"""Sign op."""
|
| 265 |
+
# T sign = Sign(T Input)
|
| 266 |
+
node_dtype = ctx.get_dtype(node.output[0])
|
| 267 |
+
utils.make_sure(node_dtype, "Dtype of {} is None".format(node.name))
|
| 268 |
+
if node_dtype in [onnx_pb.TensorProto.COMPLEX64, onnx_pb.TensorProto.COMPLEX128]:
|
| 269 |
+
raise ValueError("dtype " + str(node_dtype) + " is not supported in onnx for now")
|
| 270 |
+
zero_name = utils.make_name("{}_zero".format(node.name))
|
| 271 |
+
ctx.make_const(zero_name, np.array(0, dtype=np.float32))
|
| 272 |
+
if node_dtype not in [onnx_pb.TensorProto.FLOAT16, onnx_pb.TensorProto.FLOAT, onnx_pb.TensorProto.DOUBLE]:
|
| 273 |
+
cast_node_0 = ctx.make_node("Cast", [node.input[0]], {"to": onnx_pb.TensorProto.FLOAT})
|
| 274 |
+
greater_node = ctx.make_node("Greater", [cast_node_0.output[0], zero_name])
|
| 275 |
+
less_node = ctx.make_node("Less", [cast_node_0.output[0], zero_name])
|
| 276 |
+
else:
|
| 277 |
+
greater_node = ctx.make_node("Greater", [node.input[0], zero_name])
|
| 278 |
+
less_node = ctx.make_node("Less", [node.input[0], zero_name])
|
| 279 |
+
cast_node_1 = ctx.make_node("Cast", [greater_node.output[0]], {"to": node_dtype})
|
| 280 |
+
cast_node_2 = ctx.make_node("Cast", [less_node.output[0]], {"to": node_dtype})
|
| 281 |
+
|
| 282 |
+
shapes = node.output_shapes
|
| 283 |
+
dtypes = node.output_dtypes
|
| 284 |
+
ctx.remove_node(node.name)
|
| 285 |
+
ctx.make_node("Sub", [cast_node_1.output[0], cast_node_2.output[0]], outputs=[node.output[0]],
|
| 286 |
+
shapes=shapes, dtypes=dtypes)
|
| 287 |
+
|
| 288 |
+
@classmethod
|
| 289 |
+
def version_9(cls, ctx, node, **kwargs):
|
| 290 |
+
node_dtype = ctx.get_dtype(node.output[0])
|
| 291 |
+
utils.make_sure(node_dtype, "dtype of {} is None".format(node.name))
|
| 292 |
+
if node_dtype in [onnx_pb.TensorProto.BOOL, onnx_pb.TensorProto.COMPLEX64, onnx_pb.TensorProto.COMPLEX128]:
|
| 293 |
+
raise ValueError("dtype " + str(node_dtype) + " is not supported in onnx for now")
|
| 294 |
+
|
| 295 |
+
|
| 296 |
+
@tf_op("Pow")
|
| 297 |
+
class Pow:
|
| 298 |
+
@classmethod
|
| 299 |
+
def version_1(cls, ctx, node, **kwargs):
|
| 300 |
+
if ctx.is_target(constants.TARGET_CAFFE2):
|
| 301 |
+
# workaround a bug in caffe2 pre Feb2018, pow(a, b) becomes np.exp(np.log(a) * b)
|
| 302 |
+
node.type = "Log"
|
| 303 |
+
b = node.input[1]
|
| 304 |
+
ctx.remove_input(node, node.input[1], 1)
|
| 305 |
+
op_name = utils.make_name(node.name)
|
| 306 |
+
mul_op = ctx.insert_new_node_on_output("Mul", node.output[0], name=op_name)
|
| 307 |
+
mul_op.input.append(b)
|
| 308 |
+
op_name = utils.make_name(node.name)
|
| 309 |
+
exp_op = ctx.insert_new_node_on_output("Exp", mul_op.output[0], name=op_name)
|
| 310 |
+
ctx.copy_shape(node.output[0], exp_op.output[0])
|
| 311 |
+
BroadcastOp.version_1(ctx, mul_op, **kwargs)
|
| 312 |
+
|
| 313 |
+
@classmethod
|
| 314 |
+
def version_7(cls, ctx, node, **kwargs):
|
| 315 |
+
pass
|
| 316 |
+
|
| 317 |
+
|
| 318 |
+
@tf_op("LRN")
|
| 319 |
+
class LRN:
|
| 320 |
+
@classmethod
|
| 321 |
+
def version_1(cls, ctx, node, **kwargs):
|
| 322 |
+
# ONNX: Each input value is divided by (bias+(alpha/size)*sum(xi^2 for every xi in the local region))^beta
|
| 323 |
+
# TF: sqr_sum[a, b, c, d] = sum(input[a, b, c, d - depth_radius : d + depth_radius + 1] ** 2)
|
| 324 |
+
# output = input / (bias + alpha * sqr_sum) ** beta
|
| 325 |
+
|
| 326 |
+
# by default, depth_radius is 5 in tensorflow
|
| 327 |
+
size = node.get_attr_value("depth_radius", 5) * 2 + 1
|
| 328 |
+
|
| 329 |
+
node.set_attr("size", size)
|
| 330 |
+
node.set_attr("alpha", size * node.get_attr("alpha").f)
|
| 331 |
+
|
| 332 |
+
shapes = node.output_shapes[0]
|
| 333 |
+
dtypes = node.output_dtypes[0]
|
| 334 |
+
|
| 335 |
+
ctx.insert_new_node_on_input(node, "Transpose", node.input[0], perm=constants.NHWC_TO_NCHW)
|
| 336 |
+
ctx.update_node_shape_dtype(node, override=True)
|
| 337 |
+
op_name = utils.make_name(node.name)
|
| 338 |
+
ctx.insert_new_node_on_output("Transpose", node.output[0], perm=constants.NCHW_TO_NHWC,
|
| 339 |
+
name=op_name, shapes=shapes, dtypes=dtypes)
|
| 340 |
+
|
| 341 |
+
|
| 342 |
+
@tf_op(["MatMul", "BatchMatMul", "BatchMatMulV2"])
|
| 343 |
+
class MatMul:
|
| 344 |
+
@classmethod
|
| 345 |
+
def version_1(cls, ctx, node, **kwargs):
|
| 346 |
+
# tensorflow allows transpose and conjugated. If found, insert the required transpose.
|
| 347 |
+
# We could use Gemm as well but tensorflow does not pass bias in matmul.
|
| 348 |
+
node.type = "MatMul"
|
| 349 |
+
|
| 350 |
+
attrs = ["transpose_a", "transpose_b", "adjoint_a", "adjoint_b", "adj_x", "adj_y"]
|
| 351 |
+
attrs_val = [node.get_attr(attr) for attr in attrs]
|
| 352 |
+
attrs_val = [0 if val is None else val.i for val in attrs_val]
|
| 353 |
+
|
| 354 |
+
dtype = ctx.get_dtype(node.output[0])
|
| 355 |
+
if any(attrs_val[2:]):
|
| 356 |
+
# conjugation operation on complex data not supported in onnx for now
|
| 357 |
+
# so if it's complex than raise exception
|
| 358 |
+
if dtype not in [onnx_pb.TensorProto.FLOAT, onnx_pb.TensorProto.FLOAT16, onnx_pb.TensorProto.DOUBLE]:
|
| 359 |
+
raise ValueError("dtype " + dtype + " is not supported in onnx matmul for now")
|
| 360 |
+
|
| 361 |
+
transpose_a = (attrs_val[0] + attrs_val[2] + attrs_val[4]) % 2
|
| 362 |
+
transpose_b = (attrs_val[1] + attrs_val[3] + attrs_val[5]) % 2
|
| 363 |
+
|
| 364 |
+
if transpose_a != 0:
|
| 365 |
+
shape = ctx.get_shape(node.input[0])
|
| 366 |
+
if shape:
|
| 367 |
+
perm = list(range(0, len(shape)))
|
| 368 |
+
tmp = perm[-1]
|
| 369 |
+
perm[-1] = perm[-2]
|
| 370 |
+
perm[-2] = tmp
|
| 371 |
+
ctx.insert_new_node_on_input(node, "Transpose", node.input[0], perm=perm)
|
| 372 |
+
|
| 373 |
+
if transpose_b != 0:
|
| 374 |
+
shape = ctx.get_shape(node.input[1])
|
| 375 |
+
if shape:
|
| 376 |
+
perm = list(range(0, len(shape)))
|
| 377 |
+
tmp = perm[-1]
|
| 378 |
+
perm[-1] = perm[-2]
|
| 379 |
+
perm[-2] = tmp
|
| 380 |
+
ctx.insert_new_node_on_input(node, "Transpose", node.input[1], perm=perm)
|
| 381 |
+
|
| 382 |
+
unsupported = ["a_is_sparse", "b_is_sparse"]
|
| 383 |
+
for i in unsupported:
|
| 384 |
+
val = node.get_attr(i)
|
| 385 |
+
if val is not None and val.i != 0:
|
| 386 |
+
raise ValueError(node.type + " attribute " + i + " is not supported")
|
| 387 |
+
|
| 388 |
+
|
| 389 |
+
@tf_op("Erf")
|
| 390 |
+
class Erf:
|
| 391 |
+
@classmethod
|
| 392 |
+
def version_1(cls, ctx, node, **kwargs):
|
| 393 |
+
"""Error function."""
|
| 394 |
+
# constant names
|
| 395 |
+
a1 = "erf_a1"
|
| 396 |
+
a2 = "erf_a2"
|
| 397 |
+
a3 = "erf_a3"
|
| 398 |
+
a4 = "erf_a4"
|
| 399 |
+
a5 = "erf_a5"
|
| 400 |
+
p = "erf_p"
|
| 401 |
+
one = "erf_one"
|
| 402 |
+
null = "erf_null"
|
| 403 |
+
|
| 404 |
+
n = node.name
|
| 405 |
+
output_name = node.output[0]
|
| 406 |
+
erf_a1_node = ctx.get_node_by_output("erf_a1")
|
| 407 |
+
if erf_a1_node is None:
|
| 408 |
+
# insert the constants for erf once
|
| 409 |
+
ctx.make_const(a1, np.array(0.254829592, dtype=np.float32))
|
| 410 |
+
ctx.make_const(a2, np.array(-0.284496736, dtype=np.float32))
|
| 411 |
+
ctx.make_const(a3, np.array(1.421413741, dtype=np.float32))
|
| 412 |
+
ctx.make_const(a4, np.array(-1.453152027, dtype=np.float32))
|
| 413 |
+
ctx.make_const(a5, np.array(1.061405429, dtype=np.float32))
|
| 414 |
+
ctx.make_const(p, np.array(0.3275911, dtype=np.float32))
|
| 415 |
+
ctx.make_const(one, np.array(1., dtype=np.float32))
|
| 416 |
+
ctx.make_const(null, np.array(0., dtype=np.float32))
|
| 417 |
+
|
| 418 |
+
x = node.input[0]
|
| 419 |
+
|
| 420 |
+
# erf(x):
|
| 421 |
+
# sign = 1 if x >= 0 else -1
|
| 422 |
+
# x = abs(x)
|
| 423 |
+
# # A&S formula 7.1.26
|
| 424 |
+
# t = 1.0 / (1.0 + p * x)
|
| 425 |
+
# y = 1.0 - (((((a5 * t + a4) * t) + a3) * t + a2) * t + a1) * t * math.exp(-x * x)
|
| 426 |
+
# return sign * y # erf(-x) = -erf(x)
|
| 427 |
+
|
| 428 |
+
x_node = ctx.make_node("Abs", [x], op_name_scope=node.name, name="x")
|
| 429 |
+
negx_node = ctx.make_node("Sub", [null, x], op_name_scope=node.name, name="negx")
|
| 430 |
+
is_positive_node = ctx.make_node("Greater", [x, null], op_name_scope=node.name, name="isPositive")
|
| 431 |
+
is_positive_value_node = ctx.make_node("Cast", is_positive_node.output, op_name_scope=node.name,
|
| 432 |
+
name="isPositiveValue", attr={"to": onnx_pb.TensorProto.FLOAT})
|
| 433 |
+
is_neg_node = ctx.make_node("Less", [x, null], op_name_scope=node.name, name="isNeg")
|
| 434 |
+
ig_neg_value_node = ctx.make_node("Cast", is_neg_node.output, op_name_scope=node.name, name="isNegValue",
|
| 435 |
+
attr={"to": onnx_pb.TensorProto.FLOAT})
|
| 436 |
+
sign0_node = ctx.make_node("Sub", [is_positive_value_node.output[0], ig_neg_value_node.output[0]],
|
| 437 |
+
op_name_scope=node.name, name="sign0")
|
| 438 |
+
sign_add_one_node = ctx.make_node("Add", [sign0_node.output[0], one], op_name_scope=node.name,
|
| 439 |
+
name="signAddOne")
|
| 440 |
+
non_zero_node = ctx.make_node("Abs", sign0_node.output, op_name_scope=node.name, name="nonZero")
|
| 441 |
+
sign_node = ctx.make_node("Sub", [sign_add_one_node.output[0], non_zero_node.output[0]],
|
| 442 |
+
op_name_scope=node.name, name="sign")
|
| 443 |
+
num_4_node = ctx.make_node("Mul", [x_node.output[0], p], op_name_scope=node.name, name="4")
|
| 444 |
+
num_5_node = ctx.make_node("Add", [num_4_node.output[0], one], op_name_scope=node.name, name="5")
|
| 445 |
+
t_node = ctx.make_node("Div", [one, num_5_node.output[0]], op_name_scope=node.name, name="t")
|
| 446 |
+
xsq_node = ctx.make_node("Mul", [x, negx_node.output[0]], op_name_scope=node.name, name="xsq")
|
| 447 |
+
num_6_node = ctx.make_node("Exp", xsq_node.output, op_name_scope=node.name, name="6")
|
| 448 |
+
num_7_node = ctx.make_node("Mul", [num_6_node.output[0], t_node.output[0]], op_name_scope=node.name, name="7")
|
| 449 |
+
num_8_node = ctx.make_node("Mul", [t_node.output[0], a5], op_name_scope=node.name, name="8")
|
| 450 |
+
num_9_node = ctx.make_node("Add", [num_8_node.output[0], a4], op_name_scope=node.name, name="9")
|
| 451 |
+
num_10_node = ctx.make_node("Mul", [num_9_node.output[0], t_node.output[0]], op_name_scope=node.name, name="10")
|
| 452 |
+
num_11_node = ctx.make_node("Add", [num_10_node.output[0], a3], op_name_scope=node.name, name="11")
|
| 453 |
+
num_12_node = ctx.make_node("Mul", [num_11_node.output[0], t_node.output[0]], op_name_scope=node.name,
|
| 454 |
+
name="12")
|
| 455 |
+
num_13_node = ctx.make_node("Add", [num_12_node.output[0], a2], op_name_scope=node.name, name="13")
|
| 456 |
+
num_14_node = ctx.make_node("Mul", [num_13_node.output[0], t_node.output[0]], op_name_scope=node.name,
|
| 457 |
+
name="14")
|
| 458 |
+
num_15_node = ctx.make_node("Add", [num_14_node.output[0], a1], op_name_scope=node.name, name="15")
|
| 459 |
+
num_16_node = ctx.make_node("Mul", [num_15_node.output[0], num_7_node.output[0]], op_name_scope=node.name,
|
| 460 |
+
name="16")
|
| 461 |
+
num_17_node = ctx.make_node("Sub", [one, num_16_node.output[0]], op_name_scope=node.name, name="17")
|
| 462 |
+
|
| 463 |
+
shapes = node.output_shapes
|
| 464 |
+
dtypes = node.output_dtypes
|
| 465 |
+
ctx.remove_node(node.name)
|
| 466 |
+
ctx.make_node("Mul", [num_17_node.output[0], sign_node.output[0]], outputs=[output_name], name=n,
|
| 467 |
+
shapes=shapes, dtypes=dtypes)
|
| 468 |
+
|
| 469 |
+
@classmethod
|
| 470 |
+
def version_9(cls, ctx, node, **kwargs):
|
| 471 |
+
pass
|
| 472 |
+
|
| 473 |
+
|
| 474 |
+
@tf_op("FloorDiv")
|
| 475 |
+
class FloorDiv:
|
| 476 |
+
@classmethod
|
| 477 |
+
def version_6(cls, ctx, node, **kwargs):
|
| 478 |
+
# T output = FloorDiv(T x, T y)
|
| 479 |
+
node.type = "Div"
|
| 480 |
+
dtype = ctx.get_dtype(node.input[0])
|
| 481 |
+
if dtype in [onnx_pb.TensorProto.FLOAT, onnx_pb.TensorProto.FLOAT16, onnx_pb.TensorProto.DOUBLE]:
|
| 482 |
+
new_node_name = utils.make_name("floor_div_res")
|
| 483 |
+
floor_res = ctx.insert_new_node_on_output(op_type="Floor", output_name=node.output[0],
|
| 484 |
+
name=new_node_name)
|
| 485 |
+
ctx.copy_dtype(node.output[0], floor_res.output[0])
|
| 486 |
+
ctx.copy_shape(node.output[0], floor_res.output[0])
|
| 487 |
+
|
| 488 |
+
|
| 489 |
+
@tf_op("FloorMod")
|
| 490 |
+
class FloorMod:
|
| 491 |
+
@classmethod
|
| 492 |
+
def version_7(cls, ctx, node, **kwargs):
|
| 493 |
+
# T output = FloorMod(T x, T y)
|
| 494 |
+
div = ctx.make_node(op_type="Div", inputs=node.input)
|
| 495 |
+
dtype = ctx.get_dtype(node.input[0])
|
| 496 |
+
if dtype in [onnx_pb.TensorProto.FLOAT, onnx_pb.TensorProto.FLOAT16, onnx_pb.TensorProto.DOUBLE]:
|
| 497 |
+
div = ctx.make_node(op_type="Floor", inputs=div.output)
|
| 498 |
+
|
| 499 |
+
mul = ctx.make_node(op_type="Mul", inputs=[div.output[0], node.input[1]])
|
| 500 |
+
# res node will take over shape&dtype&output connection info of original "node"
|
| 501 |
+
shapes = node.output_shapes
|
| 502 |
+
dtypes = node.output_dtypes
|
| 503 |
+
ctx.remove_node(node.name)
|
| 504 |
+
ctx.make_node(op_type="Sub", inputs=[node.input[0], mul.output[0]],
|
| 505 |
+
name=node.name, outputs=node.output, shapes=shapes, dtypes=dtypes)
|
| 506 |
+
|
| 507 |
+
|
| 508 |
+
@tf_op("Selu")
|
| 509 |
+
class Selu:
|
| 510 |
+
@classmethod
|
| 511 |
+
def version_1(cls, ctx, node, **kwargs):
|
| 512 |
+
pass
|
| 513 |
+
|
| 514 |
+
|
| 515 |
+
@tf_op("Cumsum", onnx_op="CumSum")
|
| 516 |
+
class CumSum:
|
| 517 |
+
@classmethod
|
| 518 |
+
def version_11(cls, ctx, node, **kwargs):
|
| 519 |
+
pass
|
| 520 |
+
|
| 521 |
+
|
| 522 |
+
@tf_op("Round")
|
| 523 |
+
class Round:
|
| 524 |
+
@classmethod
|
| 525 |
+
def version_11(cls, ctx, node, **kwargs):
|
| 526 |
+
pass
|
| 527 |
+
|
| 528 |
+
|
| 529 |
+
@tf_op("MatrixDeterminant", onnx_op="Det")
|
| 530 |
+
class Det:
|
| 531 |
+
@classmethod
|
| 532 |
+
def version_11(cls, ctx, node, **kwargs):
|
| 533 |
+
pass
|
| 534 |
+
|
| 535 |
+
|
| 536 |
+
@tf_op(["LeftShift", "RightShift"])
|
| 537 |
+
class BitShift:
|
| 538 |
+
|
| 539 |
+
@classmethod
|
| 540 |
+
def version_11(cls, ctx, node, **kwargs):
|
| 541 |
+
dir_map = {"LeftShift": "LEFT", "RightShift": "RIGHT"}
|
| 542 |
+
direction = dir_map[node.type]
|
| 543 |
+
supported = [onnx_pb.TensorProto.UINT8, onnx_pb.TensorProto.UINT16,
|
| 544 |
+
onnx_pb.TensorProto.UINT32, onnx_pb.TensorProto.UINT64]
|
| 545 |
+
type_map = {onnx_pb.TensorProto.INT8: onnx_pb.TensorProto.UINT8,
|
| 546 |
+
onnx_pb.TensorProto.INT16: onnx_pb.TensorProto.UINT32,
|
| 547 |
+
onnx_pb.TensorProto.INT32: onnx_pb.TensorProto.UINT64}
|
| 548 |
+
shapes = node.output_shapes
|
| 549 |
+
dtypes = node.output_dtypes
|
| 550 |
+
ctx.remove_node(node.name)
|
| 551 |
+
|
| 552 |
+
node = ctx.make_node("BitShift", inputs=node.input, outputs=node.output, name=node.name,
|
| 553 |
+
shapes=shapes, dtypes=dtypes, domain=constants.ONNX_DOMAIN, attr={'direction': direction})
|
| 554 |
+
|
| 555 |
+
if node.maybe_cast_input([supported, supported], type_map):
|
| 556 |
+
cast_back_node = ctx.insert_new_node_on_output(
|
| 557 |
+
"Cast", node.output[0], name=utils.make_name(node.name) + "_castback",
|
| 558 |
+
to=dtypes[0])
|
| 559 |
+
ctx.set_dtype(cast_back_node.output[0], dtypes[0])
|
| 560 |
+
ctx.copy_shape(node.name, cast_back_node.output[0])
|
| 561 |
+
|
| 562 |
+
|
| 563 |
+
@tf_op("SquaredDistance", onnx_op="MeanSquaredDistance")
|
| 564 |
+
class SquaredDistance:
|
| 565 |
+
@classmethod
|
| 566 |
+
def version_12(cls, ctx, node, **kwargs):
|
| 567 |
+
node.attr["reduction"] = "none"
|
| 568 |
+
|
| 569 |
+
|
| 570 |
+
@tf_op("Einsum")
|
| 571 |
+
class Einsum:
|
| 572 |
+
@classmethod
|
| 573 |
+
def version_12(cls, ctx, node, **kwargs):
|
| 574 |
+
del node.attr["N"]
|
| 575 |
+
node.attr["equation"].s = node.attr["equation"].s.lower()
|
| 576 |
+
|
| 577 |
+
|
| 578 |
+
@tf_op("IsFinite")
|
| 579 |
+
class IsFinite:
|
| 580 |
+
@classmethod
|
| 581 |
+
def version_10(cls, ctx, node, **kwargs):
|
| 582 |
+
# map to onnx as:
|
| 583 |
+
# not (isinf(x) or isnan(x))
|
| 584 |
+
|
| 585 |
+
shapes = node.output_shapes
|
| 586 |
+
dtypes = [onnx_pb.TensorProto.BOOL] * len(node.output_dtypes)
|
| 587 |
+
outputs = node.output
|
| 588 |
+
|
| 589 |
+
ctx.remove_node(node.name)
|
| 590 |
+
|
| 591 |
+
inf_node = ctx.make_node("IsInf", inputs=node.input, name=utils.make_name(node.name),
|
| 592 |
+
shapes=shapes, dtypes=dtypes)
|
| 593 |
+
nan_node = ctx.make_node("IsNaN", inputs=node.input, name=utils.make_name(node.name),
|
| 594 |
+
shapes=shapes, dtypes=dtypes)
|
| 595 |
+
or_node = ctx.make_node("Or", inputs=[inf_node.output[0], nan_node.output[0]], name=utils.make_name(node.name),
|
| 596 |
+
shapes=shapes, dtypes=dtypes)
|
| 597 |
+
_ = ctx.make_node("Not", inputs=or_node.output, name=node.name, outputs=outputs,
|
| 598 |
+
shapes=shapes, dtypes=dtypes)
|
| 599 |
+
|
| 600 |
+
|
| 601 |
+
@tf_op("Atan2")
|
| 602 |
+
class Atan2Op:
|
| 603 |
+
# support more dtype
|
| 604 |
+
|
| 605 |
+
@classmethod
|
| 606 |
+
def version_9(cls, ctx, node, **kwargs):
|
| 607 |
+
"""
|
| 608 |
+
Obtained with a linear regression.
|
| 609 |
+
|
| 610 |
+
::
|
| 611 |
+
|
| 612 |
+
def atan2(y, x):
|
| 613 |
+
sx = numpy.sign(x)
|
| 614 |
+
sy = numpy.sign(y)
|
| 615 |
+
pi_part = (sy + sx * (sy ** 2 - 1)) * (sx - 1) * (-numpy.pi/2)
|
| 616 |
+
atan_part = numpy.arctan(y / (x + (1 - sx ** 2))) * sx ** 2
|
| 617 |
+
return atan_part + pi_part
|
| 618 |
+
"""
|
| 619 |
+
supported_dtypes = [
|
| 620 |
+
onnx_pb.TensorProto.FLOAT,
|
| 621 |
+
onnx_pb.TensorProto.FLOAT16,
|
| 622 |
+
onnx_pb.TensorProto.DOUBLE
|
| 623 |
+
]
|
| 624 |
+
|
| 625 |
+
onnx_dtype = ctx.get_dtype(node.input[0])
|
| 626 |
+
utils.make_sure(onnx_dtype in supported_dtypes, "Unsupported input type.")
|
| 627 |
+
shape = ctx.get_shape(node.input[0])
|
| 628 |
+
np_dtype = utils.map_onnx_to_numpy_type(onnx_dtype)
|
| 629 |
+
|
| 630 |
+
# sign part
|
| 631 |
+
|
| 632 |
+
sign_x_node = ctx.make_node(
|
| 633 |
+
"Sign", inputs=node.input[1:],
|
| 634 |
+
name=utils.make_name(node.name + 'signx'))
|
| 635 |
+
sign_y_node = ctx.make_node(
|
| 636 |
+
"Sign", inputs=node.input[:1],
|
| 637 |
+
name=utils.make_name(node.name + 'signy'))
|
| 638 |
+
|
| 639 |
+
sx_node = ctx.make_node(
|
| 640 |
+
"Cast", sign_x_node.output[:1], attr={"to": onnx_dtype},
|
| 641 |
+
name=utils.make_name(node.name + 'csignx'))
|
| 642 |
+
sy_node = ctx.make_node(
|
| 643 |
+
"Cast", sign_y_node.output[:1], attr={"to": onnx_dtype},
|
| 644 |
+
name=utils.make_name(node.name + 'csigny'))
|
| 645 |
+
|
| 646 |
+
# cst
|
| 647 |
+
|
| 648 |
+
one_node = ctx.make_const(
|
| 649 |
+
utils.make_name("{}_one".format(node.name)),
|
| 650 |
+
np.array([1], dtype=np_dtype))
|
| 651 |
+
|
| 652 |
+
pib2_node = ctx.make_const(
|
| 653 |
+
utils.make_name("{}_pi".format(node.name)),
|
| 654 |
+
np.array(- np.pi / 2, dtype=np_dtype))
|
| 655 |
+
|
| 656 |
+
# pi_part = (sy + sx * (sy ** 2 - 1)) * (sx - 1) * (-numpy.pi/2)
|
| 657 |
+
|
| 658 |
+
sxm1_node = ctx.make_node(
|
| 659 |
+
"Sub", [sx_node.output[0], one_node.output[0]],
|
| 660 |
+
name=utils.make_name(node.name + 'sxm1'))
|
| 661 |
+
sy2_node = ctx.make_node(
|
| 662 |
+
"Mul", [sy_node.output[0], sy_node.output[0]],
|
| 663 |
+
name=utils.make_name(node.name + 'sy2'))
|
| 664 |
+
sy2m1_node = ctx.make_node(
|
| 665 |
+
"Sub", [sy2_node.output[0], one_node.output[0]],
|
| 666 |
+
name=utils.make_name(node.name + 'sy2m1'))
|
| 667 |
+
sxsy2m1_node = ctx.make_node(
|
| 668 |
+
"Mul", [sx_node.output[0], sy2m1_node.output[0]],
|
| 669 |
+
name=utils.make_name(node.name + 'sxsy2m1'))
|
| 670 |
+
sysxsy2m1_node = ctx.make_node(
|
| 671 |
+
"Add", [sy_node.output[0], sxsy2m1_node.output[0]],
|
| 672 |
+
name=utils.make_name(node.name + 'sysxsy2m1'))
|
| 673 |
+
m1_node = ctx.make_node(
|
| 674 |
+
"Mul", [sysxsy2m1_node.output[0], sxm1_node.output[0]],
|
| 675 |
+
name=utils.make_name(node.name + 'm1'))
|
| 676 |
+
pi_part = ctx.make_node(
|
| 677 |
+
"Mul", [m1_node.output[0], pib2_node.output[0]],
|
| 678 |
+
name=utils.make_name(node.name + 'pip'))
|
| 679 |
+
|
| 680 |
+
# atan
|
| 681 |
+
|
| 682 |
+
sx2_node = ctx.make_node(
|
| 683 |
+
"Mul", [sx_node.output[0], sx_node.output[0]],
|
| 684 |
+
name=utils.make_name(node.name + 'sx2'))
|
| 685 |
+
sx2m1_node = ctx.make_node(
|
| 686 |
+
"Sub", [sx2_node.output[0], one_node.output[0]],
|
| 687 |
+
name=utils.make_name(node.name + 'sx2m1'))
|
| 688 |
+
xsx2m1_node = ctx.make_node(
|
| 689 |
+
"Add", [node.input[1], sx2m1_node.output[0]],
|
| 690 |
+
name=utils.make_name(node.name + 'xsx2m1'))
|
| 691 |
+
div_node = ctx.make_node(
|
| 692 |
+
"Div", inputs=[node.input[0], xsx2m1_node.output[0]],
|
| 693 |
+
name=utils.make_name(node.name + 'div'))
|
| 694 |
+
atan0_node = ctx.make_node(
|
| 695 |
+
"Atan", inputs=[div_node.output[0]],
|
| 696 |
+
name=utils.make_name(node.name + 'atan0'))
|
| 697 |
+
atan_node = ctx.make_node(
|
| 698 |
+
"Mul", inputs=[sx2_node.output[0], atan0_node.output[0]],
|
| 699 |
+
name=utils.make_name(node.name + 'atan'))
|
| 700 |
+
|
| 701 |
+
# final
|
| 702 |
+
|
| 703 |
+
ctx.remove_node(node.name)
|
| 704 |
+
|
| 705 |
+
last_node = ctx.make_node(
|
| 706 |
+
"Add", inputs=[atan_node.output[0], pi_part.output[0]],
|
| 707 |
+
op_name_scope=node.name + 'all',
|
| 708 |
+
shapes=[shape], dtypes=[onnx_dtype])
|
| 709 |
+
ctx.replace_all_inputs(node.output[0], last_node.output[0]) # ops=ctx.get_nodes()
|
| 710 |
+
|
| 711 |
+
|
| 712 |
+
@tf_op("InvertPermutation")
|
| 713 |
+
class InvertPermutationOp:
|
| 714 |
+
|
| 715 |
+
@classmethod
|
| 716 |
+
def version_11(cls, ctx, node, **kwargs):
|
| 717 |
+
|
| 718 |
+
supported_dtypes = [onnx_pb.TensorProto.INT32, onnx_pb.TensorProto.INT64]
|
| 719 |
+
onnx_dtype = ctx.get_dtype(node.input[0])
|
| 720 |
+
utils.make_sure(onnx_dtype in supported_dtypes, "InvertPermutation only applies on INT32, INT64.")
|
| 721 |
+
|
| 722 |
+
shape = ctx.get_shape(node.input[0])
|
| 723 |
+
|
| 724 |
+
shape_node = ctx.make_node(
|
| 725 |
+
"Shape", inputs=node.input, name=utils.make_name(node.name + '_shape'))
|
| 726 |
+
|
| 727 |
+
neg_node = ctx.make_node(
|
| 728 |
+
"Neg", inputs=node.input, name=utils.make_name(node.name + '_neg'))
|
| 729 |
+
|
| 730 |
+
topk_node = ctx.make_node(
|
| 731 |
+
"TopK", inputs=[neg_node.output[0], shape_node.output[0]],
|
| 732 |
+
name=utils.make_name(node.name + '_topk'), output_count=2)
|
| 733 |
+
|
| 734 |
+
ctx.remove_node(node.name)
|
| 735 |
+
|
| 736 |
+
last_node = ctx.make_node(
|
| 737 |
+
"Identity", inputs=topk_node.output[1:], name=utils.make_name(node.name + '_indices'),
|
| 738 |
+
shapes=[shape], dtypes=[onnx_dtype])
|
| 739 |
+
|
| 740 |
+
ctx.replace_all_inputs(node.output[0], last_node.output[0]) # ops=ctx.get_nodes()
|
lib/python3.10/site-packages/tf2onnx/onnx_opset/misc.py
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
"""
|
| 5 |
+
misc
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
from __future__ import division
|
| 9 |
+
from __future__ import print_function
|
| 10 |
+
from __future__ import unicode_literals
|
| 11 |
+
|
| 12 |
+
import logging
|
| 13 |
+
|
| 14 |
+
from tf2onnx.handler import tf_op
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
logger = logging.getLogger(__name__)
|
| 18 |
+
|
| 19 |
+
# pylint: disable=unused-argument,missing-docstring
|
| 20 |
+
|
| 21 |
+
@tf_op(["CheckNumerics", "StopGradient"])
|
| 22 |
+
class MoveToIdent:
|
| 23 |
+
@classmethod
|
| 24 |
+
def version_1(cls, ctx, node, **kwargs):
|
| 25 |
+
node.type = "Identity"
|
| 26 |
+
if node.inputs[0].is_const():
|
| 27 |
+
# should not remove the identity node if it is output of the graph
|
| 28 |
+
if node.output[0] in ctx.outputs:
|
| 29 |
+
return
|
| 30 |
+
# if identity has a const as input, remove it
|
| 31 |
+
input_name = node.input[0]
|
| 32 |
+
output_name = node.output[0]
|
| 33 |
+
ctx.replace_all_inputs(output_name, input_name) # ops=ctx.get_nodes()
|
| 34 |
+
ctx.remove_node(node.name)
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
@tf_op(["Placeholder", "PlaceholderV2", "PlaceholderWithDefault"])
|
| 38 |
+
class DirectOp:
|
| 39 |
+
@classmethod
|
| 40 |
+
def version_1(cls, ctx, node, **kwargs):
|
| 41 |
+
pass
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
@tf_op("NoOp")
|
| 45 |
+
class NukeNode:
|
| 46 |
+
@classmethod
|
| 47 |
+
def version_1(cls, ctx, node, **kwargs):
|
| 48 |
+
ctx.remove_node(node.name)
|
lib/python3.10/site-packages/tf2onnx/onnx_opset/nn.py
ADDED
|
@@ -0,0 +1,1534 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
"""
|
| 5 |
+
nn
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
from __future__ import division
|
| 9 |
+
from __future__ import print_function
|
| 10 |
+
from __future__ import unicode_literals
|
| 11 |
+
|
| 12 |
+
import logging
|
| 13 |
+
|
| 14 |
+
import numpy as np
|
| 15 |
+
from onnx import onnx_pb, helper
|
| 16 |
+
from onnx.onnx_pb import TensorProto
|
| 17 |
+
from tf2onnx import constants, utils
|
| 18 |
+
from tf2onnx.graph_builder import GraphBuilder
|
| 19 |
+
from tf2onnx.handler import tf_op
|
| 20 |
+
from tf2onnx.onnx_opset import common, controlflow, tensor
|
| 21 |
+
|
| 22 |
+
logger = logging.getLogger(__name__)
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
# pylint: disable=unused-argument,missing-docstring,unused-variable
|
| 26 |
+
|
| 27 |
+
def spatial_map(shape, perm):
|
| 28 |
+
new_shape = shape[:]
|
| 29 |
+
for i in perm:
|
| 30 |
+
new_shape[i] = shape[perm[i]]
|
| 31 |
+
return new_shape
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def is_channels_last(node):
|
| 35 |
+
"""Returns whether node is channels last, so (N, ..., C)."""
|
| 36 |
+
|
| 37 |
+
return not node.data_format.startswith("NC")
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def make_shape_channels_first(shape):
|
| 41 |
+
"""Makes a (N, ..., C) shape into (N, C, ...)."""
|
| 42 |
+
|
| 43 |
+
return shape[:1] + shape[-1:] + shape[1:-1]
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
def make_shape_channels_last(shape):
|
| 47 |
+
"""Makes a (N, C, ...) shape into (N, ..., C)."""
|
| 48 |
+
|
| 49 |
+
return shape[:1] + shape[1:-1] + shape[1:2]
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
def get_channels_first_permutation(spatial):
|
| 53 |
+
"""Returns a permutation to make a (N, ..., C) array into (N, C, ...)."""
|
| 54 |
+
|
| 55 |
+
return [0, spatial + 1] + list(range(1, spatial + 1))
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
def get_channels_last_permutation(spatial):
|
| 59 |
+
"""Returns a permutation to make a (N, C, ...) array into (N, ..., C)."""
|
| 60 |
+
|
| 61 |
+
return [0] + list(range(2, spatial + 2)) + [1]
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
def conv_convert_inputs(ctx, node, with_kernel=False, new_kernel_shape=None,
|
| 65 |
+
input_indices=None, output_indices=None, spatial=2):
|
| 66 |
+
"""Convert input and kernel from tensorflow to onnx. This maybe require to
|
| 67 |
+
to insert transpose ops for input, kernel and output unless they are constants
|
| 68 |
+
and we can transpose the constant.
|
| 69 |
+
We transpose inputs if they are in NHWC. We always transpose the kernel from
|
| 70 |
+
HWNC to NCHW. Outputs are transposed if the format is NHWC.
|
| 71 |
+
Some convolutions like depthwise_conv2d require a reshape of the kernel.
|
| 72 |
+
|
| 73 |
+
Args:
|
| 74 |
+
ctx: The parent graph.
|
| 75 |
+
node: Node of the convolution op.
|
| 76 |
+
with_kernel: Transpose the kernel.
|
| 77 |
+
new_kernel_shape: Pass to reshape the kernel.
|
| 78 |
+
input_indices: Indices that define the inputs.
|
| 79 |
+
output_indices: Indices that define the outputs.
|
| 80 |
+
"""
|
| 81 |
+
|
| 82 |
+
if input_indices is None:
|
| 83 |
+
input_indices = [0]
|
| 84 |
+
if output_indices is None:
|
| 85 |
+
output_indices = [0]
|
| 86 |
+
|
| 87 |
+
# Transpose inputs if needed.
|
| 88 |
+
if is_channels_last(node):
|
| 89 |
+
# Ge channels first permutation.
|
| 90 |
+
permutation = get_channels_first_permutation(spatial)
|
| 91 |
+
|
| 92 |
+
# Transpose input if needed, no need to record shapes on input
|
| 93 |
+
for idx in input_indices:
|
| 94 |
+
# If input is a constant, transpose that one if we are the only consumer.
|
| 95 |
+
input_node = node.inputs[idx]
|
| 96 |
+
input_name = node.input[idx]
|
| 97 |
+
|
| 98 |
+
if input_node.is_const() and len(ctx.find_output_consumers(input_name)) == 1:
|
| 99 |
+
# Transpose constant to make it channels first.
|
| 100 |
+
val = input_node.get_tensor_value(as_list=False)
|
| 101 |
+
val = np.transpose(val, permutation)
|
| 102 |
+
|
| 103 |
+
input_node.set_tensor_value(val)
|
| 104 |
+
else:
|
| 105 |
+
# Insert transpose op.
|
| 106 |
+
transpose = ctx.insert_new_node_on_input(node, "Transpose", input_name)
|
| 107 |
+
transpose.set_attr("perm", permutation)
|
| 108 |
+
transpose.skip_conversion = True
|
| 109 |
+
|
| 110 |
+
shape = ctx.get_shape(input_name)
|
| 111 |
+
if shape is not None:
|
| 112 |
+
new_shape = make_shape_channels_first(shape)
|
| 113 |
+
|
| 114 |
+
ctx.set_shape(transpose.output[0], new_shape)
|
| 115 |
+
|
| 116 |
+
# Transpose kernel if needed.
|
| 117 |
+
if with_kernel:
|
| 118 |
+
# Some ONNX convolution ops require to reshape the kernel (ie. depthwise_conv2d).
|
| 119 |
+
if new_kernel_shape:
|
| 120 |
+
kernel_name = node.input[1]
|
| 121 |
+
if ctx.opset < 5:
|
| 122 |
+
# Old reshape takes new shape as attribute.
|
| 123 |
+
reshape = ctx.insert_new_node_on_input(node, "Reshape", kernel_name)
|
| 124 |
+
reshape.set_attr("shape", new_kernel_shape)
|
| 125 |
+
reshape.skip_conversion = True
|
| 126 |
+
else:
|
| 127 |
+
# New reshape takes new shape as input[1].
|
| 128 |
+
shape_name = utils.make_name(node.name)
|
| 129 |
+
ctx.make_const(shape_name, np.array(new_kernel_shape, dtype=np.int64))
|
| 130 |
+
|
| 131 |
+
reshape = ctx.make_node("Reshape", [kernel_name, shape_name])
|
| 132 |
+
ctx.replace_input(node, kernel_name, reshape.output[0], 1)
|
| 133 |
+
|
| 134 |
+
reshape.skip_conversion = True
|
| 135 |
+
ctx.set_shape(reshape.output[0], new_kernel_shape)
|
| 136 |
+
|
| 137 |
+
# Get kernel (may have be changed to a reshape above).
|
| 138 |
+
kernel_node = node.inputs[1]
|
| 139 |
+
kernel_name = node.input[1]
|
| 140 |
+
|
| 141 |
+
# Transpose kernel from (..., C_in, C_out) to (C_out, C_in, ...)
|
| 142 |
+
permutation = [spatial + 1, spatial] + list(range(spatial))
|
| 143 |
+
|
| 144 |
+
# If kernel is a constant, transpose that one if we are the only consumer.
|
| 145 |
+
need_transpose = True
|
| 146 |
+
if kernel_node.is_const() and len(ctx.find_output_consumers(kernel_name)) == 1:
|
| 147 |
+
val = kernel_node.get_tensor_value(as_list=False)
|
| 148 |
+
val = np.transpose(val, permutation)
|
| 149 |
+
|
| 150 |
+
kernel_node.set_tensor_value(val)
|
| 151 |
+
need_transpose = False
|
| 152 |
+
|
| 153 |
+
if need_transpose:
|
| 154 |
+
transpose = ctx.insert_new_node_on_input(node, "Transpose", kernel_name)
|
| 155 |
+
transpose.set_attr("perm", permutation)
|
| 156 |
+
transpose.skip_conversion = True
|
| 157 |
+
|
| 158 |
+
new_shape = spatial_map(ctx.get_shape(kernel_name), permutation)
|
| 159 |
+
ctx.set_shape(transpose.output[0], new_shape)
|
| 160 |
+
|
| 161 |
+
# Transpose outputs back if needed.
|
| 162 |
+
if is_channels_last(node):
|
| 163 |
+
for idx in output_indices:
|
| 164 |
+
# Make output channels last again by transposing.
|
| 165 |
+
output_name = node.output[idx]
|
| 166 |
+
output_shape = ctx.get_shape(node.output[idx])
|
| 167 |
+
|
| 168 |
+
permutation = get_channels_last_permutation(spatial)
|
| 169 |
+
|
| 170 |
+
op_name = utils.make_name(node.name)
|
| 171 |
+
transpose = ctx.insert_new_node_on_output("Transpose", output_name, name=op_name)
|
| 172 |
+
|
| 173 |
+
transpose.set_attr("perm", permutation)
|
| 174 |
+
transpose.skip_conversion = True
|
| 175 |
+
|
| 176 |
+
# Set tensorflow channels last shape as the transpose node shape.
|
| 177 |
+
ctx.set_shape(transpose.output[0], output_shape)
|
| 178 |
+
|
| 179 |
+
# Make the current ONNX convolution output shape channels first.
|
| 180 |
+
ctx.set_shape(output_name, make_shape_channels_first(output_shape))
|
| 181 |
+
|
| 182 |
+
# NOTE: Not strictly correct as it can also be NCW or NCDHW for example.
|
| 183 |
+
# NOTE: Generally speaking it's channels first.
|
| 184 |
+
node.data_format = "NCHW"
|
| 185 |
+
|
| 186 |
+
|
| 187 |
+
def add_padding(ctx, node, kernel_shape, strides, dilations=None, spatial=2):
|
| 188 |
+
padding = node.get_attr("padding")
|
| 189 |
+
if not padding:
|
| 190 |
+
return
|
| 191 |
+
|
| 192 |
+
if dilations is None:
|
| 193 |
+
dilations = [1] * spatial
|
| 194 |
+
|
| 195 |
+
padding = padding.s.decode("utf-8")
|
| 196 |
+
if padding == "SAME":
|
| 197 |
+
# Initialize with all zeros.
|
| 198 |
+
# Paddings are in (x_begin, y_begin, ..., x_end, y_end, ...) order.
|
| 199 |
+
pads = [0] * (spatial * 2)
|
| 200 |
+
|
| 201 |
+
# Get shapes and check whether valid.
|
| 202 |
+
input_shape = ctx.get_shape(node.input[0])
|
| 203 |
+
output_shape = ctx.get_shape(node.output[0])
|
| 204 |
+
|
| 205 |
+
if len(input_shape) != spatial + 2:
|
| 206 |
+
raise ValueError(
|
| 207 |
+
"node {} output needs to be rank {}, is {}".format(
|
| 208 |
+
node.name, spatial + 2, len(input_shape)
|
| 209 |
+
)
|
| 210 |
+
)
|
| 211 |
+
|
| 212 |
+
if len(output_shape) != spatial + 2:
|
| 213 |
+
raise ValueError(
|
| 214 |
+
"node {} output needs to be rank {}, is {}".format(
|
| 215 |
+
node.name, spatial + 2, len(output_shape)
|
| 216 |
+
)
|
| 217 |
+
)
|
| 218 |
+
|
| 219 |
+
# Transpose to channels first if not so.
|
| 220 |
+
if is_channels_last(node):
|
| 221 |
+
input_shape = make_shape_channels_first(input_shape)
|
| 222 |
+
output_shape = make_shape_channels_first(output_shape)
|
| 223 |
+
|
| 224 |
+
# Check for unknown input/output dimensions. Fall back to auto padding if so.
|
| 225 |
+
if any(input_shape[i + 2] == -1 or output_shape[i + 2] == -1 for i in range(spatial)):
|
| 226 |
+
logger.debug(
|
| 227 |
+
"node %s has unknown dim for pads calculation, fallback to auto_pad: "
|
| 228 |
+
"input_shape=%s, output_shape=%s",
|
| 229 |
+
node.name,
|
| 230 |
+
input_shape,
|
| 231 |
+
output_shape,
|
| 232 |
+
)
|
| 233 |
+
|
| 234 |
+
node.set_attr("auto_pad", "SAME_UPPER")
|
| 235 |
+
return
|
| 236 |
+
|
| 237 |
+
# Calculate paddings.
|
| 238 |
+
for i in range(spatial):
|
| 239 |
+
pad = (
|
| 240 |
+
(output_shape[i + 2] - 1) * strides[i]
|
| 241 |
+
+ dilations[i] * (kernel_shape[i] - 1) + 1
|
| 242 |
+
- input_shape[i + 2]
|
| 243 |
+
)
|
| 244 |
+
pad = max(pad, 0)
|
| 245 |
+
|
| 246 |
+
pads[i] = pad // 2
|
| 247 |
+
pads[i + spatial] = pad - pad // 2
|
| 248 |
+
|
| 249 |
+
node.set_attr("pads", pads)
|
| 250 |
+
elif padding == "VALID":
|
| 251 |
+
pass
|
| 252 |
+
else:
|
| 253 |
+
raise ValueError("invalid padding value: {}".format(padding))
|
| 254 |
+
|
| 255 |
+
def parse_dims_attr(node, dims, spatial):
|
| 256 |
+
if is_channels_last(node):
|
| 257 |
+
# We have (N, ..., C) or (...).
|
| 258 |
+
if len(dims) != spatial:
|
| 259 |
+
dims = dims[1:-1]
|
| 260 |
+
else:
|
| 261 |
+
# We have (N, C, ...).
|
| 262 |
+
dims = dims[2:]
|
| 263 |
+
return dims
|
| 264 |
+
|
| 265 |
+
def conv_dims_attr(node, name, new_name=None, spatial=2):
|
| 266 |
+
# Fetch attribute.
|
| 267 |
+
if new_name is None:
|
| 268 |
+
new_name = name
|
| 269 |
+
|
| 270 |
+
dims = node.get_attr(name)
|
| 271 |
+
if not dims:
|
| 272 |
+
return None
|
| 273 |
+
|
| 274 |
+
# Get spatial part.
|
| 275 |
+
dims = dims.ints
|
| 276 |
+
dims = parse_dims_attr(node, dims, spatial)
|
| 277 |
+
|
| 278 |
+
# Set new value and return it.
|
| 279 |
+
node.set_attr(new_name, dims)
|
| 280 |
+
|
| 281 |
+
return dims
|
| 282 |
+
|
| 283 |
+
|
| 284 |
+
def conv_kernel_shape(ctx, node, input_idx, spatial=2):
|
| 285 |
+
# Kernel shape is (..., C_in, C_out).
|
| 286 |
+
kernel_shape = ctx.get_shape(node.input[input_idx])
|
| 287 |
+
if len(kernel_shape) != spatial + 2:
|
| 288 |
+
raise ValueError("kernel rank must be spatial+2")
|
| 289 |
+
|
| 290 |
+
# Get spatial part.
|
| 291 |
+
kernel_shape = kernel_shape[:spatial]
|
| 292 |
+
|
| 293 |
+
# Set new value and return it.
|
| 294 |
+
node.set_attr("kernel_shape", kernel_shape)
|
| 295 |
+
|
| 296 |
+
return kernel_shape
|
| 297 |
+
|
| 298 |
+
|
| 299 |
+
def build_dynamic_target_size(ctx, transposed_intput, target_hw):
|
| 300 |
+
"""
|
| 301 |
+
Build the target tensor shape for the Resize op.
|
| 302 |
+
|
| 303 |
+
Args:
|
| 304 |
+
- ctx: the graph context
|
| 305 |
+
- transposed_intput: A tensor of rank 4 of shape [n c h w]
|
| 306 |
+
- target_hw: tensor of rank 2 containing the target size for a resize: [nh nw]
|
| 307 |
+
|
| 308 |
+
Returns:
|
| 309 |
+
A tensor of rank 2 containing [n c nh nw]
|
| 310 |
+
"""
|
| 311 |
+
# We get the first half [n c] of the target shape
|
| 312 |
+
shape_of_transposed_input = ctx.make_node("Shape", [transposed_intput])
|
| 313 |
+
first_half_of_shape = GraphBuilder(ctx).make_slice(
|
| 314 |
+
{"data": shape_of_transposed_input.output[0], "ends": [2], "starts": [0]})
|
| 315 |
+
target_size_int64 = ctx.make_node("Cast", [target_hw], attr={'to': TensorProto.INT64})
|
| 316 |
+
# We build a tensor containing [n c nh nw]
|
| 317 |
+
final_target_size = ctx.make_node("Concat", [first_half_of_shape, target_size_int64.output[0]], {'axis': 0})
|
| 318 |
+
return final_target_size
|
| 319 |
+
|
| 320 |
+
|
| 321 |
+
@tf_op(["Conv1D", "Conv2D", "Conv3D"])
|
| 322 |
+
class ConvOp:
|
| 323 |
+
@classmethod
|
| 324 |
+
def any_version(cls, opset, ctx, node, **kwargs):
|
| 325 |
+
# ONNX specification:
|
| 326 |
+
#
|
| 327 |
+
# T output = Conv2D(T input, T filter, @list(int) strides, @bool use_cudnn_on_gpu,
|
| 328 |
+
# @string padding, @string data_format)
|
| 329 |
+
#
|
| 330 |
+
# T Y = Conv(T X, T W, T B, @AttrType.STRING auto_pad, @AttrType.INTS dilations, @AttrType.INT group,
|
| 331 |
+
# @AttrType.INTS kernel_shape, @AttrType.INTS pads, @AttrType.INTS strides)
|
| 332 |
+
#
|
| 333 |
+
|
| 334 |
+
# Determine number of spatial dimensions.
|
| 335 |
+
spatial = int(node.type[-2])
|
| 336 |
+
|
| 337 |
+
# Make it a convolution node.
|
| 338 |
+
node.type = "Conv"
|
| 339 |
+
|
| 340 |
+
# Determine kernel spatial shape, strides and dilations.
|
| 341 |
+
kernel_shape = conv_kernel_shape(ctx, node, 1, spatial=spatial)
|
| 342 |
+
strides = conv_dims_attr(node, "strides", spatial=spatial)
|
| 343 |
+
dilations = conv_dims_attr(node, "dilations", spatial=spatial)
|
| 344 |
+
|
| 345 |
+
# prefix with batch dim of [1] to satisfy rank requirements
|
| 346 |
+
input_shape = ctx.get_shape(node.input[0])
|
| 347 |
+
if len(input_shape) == spatial + 1:
|
| 348 |
+
gb = GraphBuilder(ctx)
|
| 349 |
+
usq_node = gb.make_unsqueeze({"axes": [0], 'data': node.input[0]}, return_node=True)
|
| 350 |
+
ctx.replace_inputs(node, [usq_node.output[0]] + node.input[1:])
|
| 351 |
+
|
| 352 |
+
# Set padding.
|
| 353 |
+
add_padding(
|
| 354 |
+
ctx, node, kernel_shape, strides, dilations=dilations, spatial=spatial
|
| 355 |
+
)
|
| 356 |
+
|
| 357 |
+
# Convert input and filters.
|
| 358 |
+
conv_convert_inputs(ctx, node, with_kernel=True, spatial=spatial)
|
| 359 |
+
|
| 360 |
+
@classmethod
|
| 361 |
+
def version_1(cls, ctx, node, **kwargs):
|
| 362 |
+
cls.any_version(1, ctx, node, **kwargs)
|
| 363 |
+
|
| 364 |
+
@classmethod
|
| 365 |
+
def version_11(cls, ctx, node, **kwargs):
|
| 366 |
+
# No change.
|
| 367 |
+
cls.any_version(11, ctx, node, **kwargs)
|
| 368 |
+
|
| 369 |
+
@classmethod
|
| 370 |
+
def version_13(cls, ctx, node, **kwargs):
|
| 371 |
+
# Signature change for operator Unsqueeze.
|
| 372 |
+
cls.any_version(13, ctx, node, **kwargs)
|
| 373 |
+
|
| 374 |
+
|
| 375 |
+
def get_shape_from_const_or_concat(ctx, node):
|
| 376 |
+
if node.is_const():
|
| 377 |
+
return node.get_tensor_value()
|
| 378 |
+
if node.type == 'Concat':
|
| 379 |
+
# Sometimes the shape is formed by concating a bunch of consts together
|
| 380 |
+
res = []
|
| 381 |
+
if any(ctx.get_shape(inp) != [1] for inp in node.input):
|
| 382 |
+
return None
|
| 383 |
+
for i, inp in enumerate(node.inputs):
|
| 384 |
+
# The concat is converted from a Pack. Conversion adds an unsqueeze to the inputs.
|
| 385 |
+
if node.inputs[i].type == 'Unsqueeze' and node.inputs[i].inputs[0].is_scalar():
|
| 386 |
+
res.append(node.inputs[i].inputs[0].get_tensor_value())
|
| 387 |
+
else:
|
| 388 |
+
if i == 0:
|
| 389 |
+
# For the batch dimension we don't care if it is unknown
|
| 390 |
+
res.append(-1)
|
| 391 |
+
else:
|
| 392 |
+
return None
|
| 393 |
+
return res
|
| 394 |
+
return None
|
| 395 |
+
|
| 396 |
+
@tf_op(["Conv2DBackpropInput", "Conv3DBackpropInputV2"])
|
| 397 |
+
class ConvTranspose:
|
| 398 |
+
@classmethod
|
| 399 |
+
def version_1(cls, ctx, node, **kwargs):
|
| 400 |
+
# T output = Conv2DBackpropInput(int32 input_sizes, T filter, T out_backprop,
|
| 401 |
+
# @list(int) strides, @bool use_cudnn_on_gpu, @string padding, @string data_format, @list(int) dilations)
|
| 402 |
+
# T Y = ConvTranspose(T X, T W, T B, @STRING auto_pad, @INTS dilations,
|
| 403 |
+
# @INT group, @INTS kernel_shape, @INTS output_shape, @INTS pads, @INTS strides)
|
| 404 |
+
|
| 405 |
+
if node.type == "Conv3DBackpropInputV2":
|
| 406 |
+
spatial = 3
|
| 407 |
+
else:
|
| 408 |
+
spatial = 2
|
| 409 |
+
node.type = "ConvTranspose"
|
| 410 |
+
# Note: inputs are reversed from what one would expect.
|
| 411 |
+
conv_kernel_shape(ctx, node, 1, spatial=spatial)
|
| 412 |
+
input_shape = ctx.get_shape(node.input[2])
|
| 413 |
+
output_shape_orig = node.output_shapes
|
| 414 |
+
|
| 415 |
+
# ouput_shape is explicitly specified here, in this case pads values are auto generated/calculated.
|
| 416 |
+
output_shape = get_shape_from_const_or_concat(ctx, node.inputs[0])
|
| 417 |
+
if output_shape is not None:
|
| 418 |
+
#output_shape = ctx.get_shape(node.output[0])
|
| 419 |
+
if is_channels_last(node):
|
| 420 |
+
new_output_shape = [output_shape[1], output_shape[2]]
|
| 421 |
+
input_dims = [input_shape[1], input_shape[2]]
|
| 422 |
+
if spatial == 3:
|
| 423 |
+
new_output_shape.append(output_shape[3])
|
| 424 |
+
input_dims.append(input_shape[3])
|
| 425 |
+
else:
|
| 426 |
+
new_output_shape = [output_shape[2], output_shape[3]]
|
| 427 |
+
input_dims = [input_shape[2], input_shape[3]]
|
| 428 |
+
if spatial == 3:
|
| 429 |
+
new_output_shape.append(output_shape[4])
|
| 430 |
+
input_dims.append(input_shape[4])
|
| 431 |
+
|
| 432 |
+
utils.make_sure(new_output_shape.count(-1) <= 0, "output dims need to be known")
|
| 433 |
+
utils.make_sure(all(new_output_shape[i] >= input_dims[i] for i in range(spatial)),
|
| 434 |
+
"output dims cannot be smaller than input dims.")
|
| 435 |
+
|
| 436 |
+
node.set_attr("output_shape", new_output_shape)
|
| 437 |
+
else:
|
| 438 |
+
utils.make_sure(ctx.opset >= 10, "Opset 10 needed for Conv Backprop Input with non-constant shape")
|
| 439 |
+
strides = parse_dims_attr(node, node.get_attr('strides').ints, spatial)
|
| 440 |
+
use_strides_workaround = any(d > 1 for d in strides)
|
| 441 |
+
if use_strides_workaround and ctx.opset < 12:
|
| 442 |
+
# When strides > 1, ONNX and TF have an implementation difference in ConvTranspose. ONNX outputs a
|
| 443 |
+
# slightly smaller tensor which must be padded with a row of 0s. Pad with dynamic shape requires
|
| 444 |
+
# opset >= 11 and Max of int64 needs opset >= 12. Depending on the output_shape, this row of 0s might
|
| 445 |
+
# be shaved off, in which case TF and ONNX agree. When output_shape is dynamic it is impossible to
|
| 446 |
+
# know at conversion time whether this is the case and the workaround is needed.
|
| 447 |
+
logger.warning("Conv Backprop Input with strides > 1 and non-constant shape has known bug. "
|
| 448 |
+
"Workaround requires opset 12.")
|
| 449 |
+
use_strides_workaround = False
|
| 450 |
+
input_shape = ctx.make_node("Cast", [node.input[0]], attr={'to': TensorProto.INT64})
|
| 451 |
+
output_shape = ctx.make_node("Shape", [node.output[0]])
|
| 452 |
+
output_h = GraphBuilder(ctx).make_slice(
|
| 453 |
+
{"data": output_shape.output[0], "ends": [2], "starts": [1], "axes": [0]})
|
| 454 |
+
output_w = GraphBuilder(ctx).make_slice(
|
| 455 |
+
{"data": output_shape.output[0], "ends": [3], "starts": [2], "axes": [0]})
|
| 456 |
+
expect_h = GraphBuilder(ctx).make_slice(
|
| 457 |
+
{"data": input_shape.output[0], "ends": [2], "starts": [1], "axes": [0]})
|
| 458 |
+
expect_w = GraphBuilder(ctx).make_slice(
|
| 459 |
+
{"data": input_shape.output[0], "ends": [3], "starts": [2], "axes": [0]})
|
| 460 |
+
diff_h = ctx.make_node("Sub", [output_h, expect_h])
|
| 461 |
+
diff_w = ctx.make_node("Sub", [output_w, expect_w])
|
| 462 |
+
nonneg_diff_h = diff_h
|
| 463 |
+
nonneg_diff_w = diff_w
|
| 464 |
+
|
| 465 |
+
if use_strides_workaround:
|
| 466 |
+
const_zero = ctx.make_const(utils.make_name(node.name + "_const_zero"), np.array([0], dtype=np.int64))
|
| 467 |
+
nonneg_diff_h = ctx.make_node("Max", [diff_h.output[0], const_zero.output[0]])
|
| 468 |
+
nonneg_diff_w = ctx.make_node("Max", [diff_w.output[0], const_zero.output[0]])
|
| 469 |
+
|
| 470 |
+
const_two = ctx.make_const(utils.make_name(node.name + "_const_two"), np.array([2], dtype=np.int64))
|
| 471 |
+
start_h = ctx.make_node("Div", [nonneg_diff_h.output[0], const_two.output[0]])
|
| 472 |
+
start_w = ctx.make_node("Div", [nonneg_diff_w.output[0], const_two.output[0]])
|
| 473 |
+
end_h = ctx.make_node("Add", [start_h.output[0], expect_h])
|
| 474 |
+
end_w = ctx.make_node("Add", [start_w.output[0], expect_w])
|
| 475 |
+
if spatial == 3:
|
| 476 |
+
output_d = GraphBuilder(ctx).make_slice(
|
| 477 |
+
{"data": output_shape.output[0], "ends": [4], "starts": [3], "axes": [0]})
|
| 478 |
+
expect_d = GraphBuilder(ctx).make_slice(
|
| 479 |
+
{"data": input_shape.output[0], "ends": [4], "starts": [3], "axes": [0]})
|
| 480 |
+
diff_d = ctx.make_node("Sub", [output_d, expect_d])
|
| 481 |
+
nonneg_diff_d = diff_d
|
| 482 |
+
if use_strides_workaround:
|
| 483 |
+
nonneg_diff_d = ctx.make_node("Max", [diff_d.output[0], const_zero.output[0]])
|
| 484 |
+
start_d = ctx.make_node("Div", [nonneg_diff_d.output[0], const_two.output[0]])
|
| 485 |
+
end_d = ctx.make_node("Add", [start_d.output[0], expect_d])
|
| 486 |
+
|
| 487 |
+
starts = ctx.make_node("Concat", [start_h.output[0], start_w.output[0], start_d.output[0]],
|
| 488 |
+
attr={"axis": 0})
|
| 489 |
+
ends = ctx.make_node("Concat", [end_h.output[0], end_w.output[0], end_d.output[0]], attr={"axis": 0})
|
| 490 |
+
slice_axes = ctx.make_const(utils.make_name(node.name + "_const_slice_axes"),
|
| 491 |
+
np.array([1, 2, 3], dtype=np.int64))
|
| 492 |
+
else:
|
| 493 |
+
starts = ctx.make_node("Concat", [start_h.output[0], start_w.output[0]], attr={"axis": 0})
|
| 494 |
+
ends = ctx.make_node("Concat", [end_h.output[0], end_w.output[0]], attr={"axis": 0})
|
| 495 |
+
slice_axes = ctx.make_const(utils.make_name(node.name + "_const_slice_axes"),
|
| 496 |
+
np.array([1, 2], dtype=np.int64))
|
| 497 |
+
|
| 498 |
+
slice_node = ctx.make_node("Slice",
|
| 499 |
+
[node.output[0], starts.output[0], ends.output[0], slice_axes.output[0]],
|
| 500 |
+
shapes=output_shape_orig)
|
| 501 |
+
|
| 502 |
+
final_node = slice_node
|
| 503 |
+
|
| 504 |
+
if use_strides_workaround:
|
| 505 |
+
cz = const_zero.output[0]
|
| 506 |
+
|
| 507 |
+
neg_diff_h = ctx.make_node("Neg", [diff_h.output[0]])
|
| 508 |
+
shrink_h_by = ctx.make_node("Max", [neg_diff_h.output[0], const_zero.output[0]])
|
| 509 |
+
shb = shrink_h_by.output[0]
|
| 510 |
+
|
| 511 |
+
neg_diff_w = ctx.make_node("Neg", [diff_w.output[0]])
|
| 512 |
+
shrink_w_by = ctx.make_node("Max", [neg_diff_w.output[0], const_zero.output[0]])
|
| 513 |
+
swb = shrink_w_by.output[0]
|
| 514 |
+
|
| 515 |
+
if spatial == 3:
|
| 516 |
+
neg_diff_d = ctx.make_node("Neg", [diff_d.output[0]])
|
| 517 |
+
shrink_d_by = ctx.make_node("Max", [neg_diff_d.output[0], const_zero.output[0]])
|
| 518 |
+
sdb = shrink_d_by.output[0]
|
| 519 |
+
pads = ctx.make_node("Concat", [cz, cz, cz, cz, cz, cz, shb, swb, sdb, cz], attr={"axis": 0})
|
| 520 |
+
padded_node = ctx.make_node("Pad", [slice_node.output[0], pads.output[0]])
|
| 521 |
+
else:
|
| 522 |
+
pads = ctx.make_node("Concat", [cz, cz, cz, cz, cz, shb, swb, cz], attr={"axis": 0})
|
| 523 |
+
padded_node = ctx.make_node("Pad", [slice_node.output[0], pads.output[0]])
|
| 524 |
+
|
| 525 |
+
final_node = padded_node
|
| 526 |
+
|
| 527 |
+
downstream_nodes = ctx.find_output_consumers(node.output[0])
|
| 528 |
+
downstream_nodes.remove(output_shape)
|
| 529 |
+
downstream_nodes.remove(slice_node)
|
| 530 |
+
ctx.replace_all_inputs(node.output[0], final_node.output[0], ops=downstream_nodes)
|
| 531 |
+
|
| 532 |
+
conv_dims_attr(node, "strides", spatial=spatial)
|
| 533 |
+
conv_dims_attr(node, "dilations", spatial=spatial)
|
| 534 |
+
|
| 535 |
+
# remove output_shapes input
|
| 536 |
+
ctx.remove_input(node, node.input[0], 0)
|
| 537 |
+
# swap data and kernel
|
| 538 |
+
t = node.input[0]
|
| 539 |
+
ctx.replace_input(node, node.input[0], node.input[1], 0)
|
| 540 |
+
ctx.replace_input(node, node.input[1], t, 1)
|
| 541 |
+
|
| 542 |
+
conv_convert_inputs(ctx, node, with_kernel=True, spatial=spatial)
|
| 543 |
+
|
| 544 |
+
@classmethod
|
| 545 |
+
def version_11(cls, ctx, node, **kwargs):
|
| 546 |
+
cls.version_1(ctx, node, **kwargs)
|
| 547 |
+
|
| 548 |
+
|
| 549 |
+
@tf_op(["DepthwiseConv2d", "DepthwiseConv2dNative"])
|
| 550 |
+
class DepthwiseConv2d:
|
| 551 |
+
@classmethod
|
| 552 |
+
def version_1(cls, ctx, node, **kwargs):
|
| 553 |
+
# T output = DepthwiseConv2dNative(T input, T filter, @list(int) strides, @string padding, @string data_format)
|
| 554 |
+
# T Y = ConvTranspose(T X, T W, T B, @AttrType.STRING auto_pad, @AttrType.INTS dilations, @AttrType.INT group,
|
| 555 |
+
# @AttrType.INTS kernel_shape, @AttrType.INTS output_shape, @AttrType.INTS pads, @AttrType.INTS strides)
|
| 556 |
+
#
|
| 557 |
+
# this is not documented well in onnx, the hint comes from pytorch documentation:
|
| 558 |
+
# http://pytorch.org/docs/master/nn.html#torch.nn.Conv2d
|
| 559 |
+
# The configuration when groups == in_channels and out_channels = K * in_channels
|
| 560 |
+
# where K is a positive integer is termed in literature as depthwise convolution.
|
| 561 |
+
# In other words, for an input of size (N,Cin,Hin,Win),
|
| 562 |
+
# if you want a depthwise convolution with a depthwise multiplier K,
|
| 563 |
+
# then you use the constructor arguments (in_channels=Cin,out_channels=Cin*K,...,groups=Cin)
|
| 564 |
+
#
|
| 565 |
+
node.type = "Conv"
|
| 566 |
+
input_shape = ctx.get_shape(node.input[0])
|
| 567 |
+
if len(input_shape) != 4:
|
| 568 |
+
raise ValueError("only Conv2D is supported")
|
| 569 |
+
|
| 570 |
+
kernel_shape = ctx.get_shape(node.input[1])
|
| 571 |
+
if len(kernel_shape) != 4:
|
| 572 |
+
raise ValueError("only Conv2D is supported")
|
| 573 |
+
k_h, k_w, k_input_channels, k_channel_multiplier = kernel_shape
|
| 574 |
+
if "depth_multiplier" in node.attr:
|
| 575 |
+
depth_multiplier = node.get_attr_int("depth_multiplier")
|
| 576 |
+
k_input_channels //= depth_multiplier
|
| 577 |
+
k_channel_multiplier *= depth_multiplier
|
| 578 |
+
if k_input_channels < 1:
|
| 579 |
+
raise ValueError("input channel must be positive")
|
| 580 |
+
k_output_channels = k_input_channels * k_channel_multiplier
|
| 581 |
+
|
| 582 |
+
node.set_attr("kernel_shape", [k_h, k_w])
|
| 583 |
+
strides = conv_dims_attr(node, "strides")
|
| 584 |
+
dilations = conv_dims_attr(node, "dilations")
|
| 585 |
+
node.set_attr("group", k_input_channels)
|
| 586 |
+
add_padding(ctx, node, kernel_shape, strides, dilations)
|
| 587 |
+
|
| 588 |
+
new_kernel_shape = [k_h, k_w, 1, k_output_channels]
|
| 589 |
+
conv_convert_inputs(ctx, node, with_kernel=True, new_kernel_shape=new_kernel_shape)
|
| 590 |
+
|
| 591 |
+
|
| 592 |
+
@tf_op(["AvgPool", "AvgPool3D"], onnx_op="AveragePool")
|
| 593 |
+
@tf_op(["MaxPool", "MaxPoolV2", "MaxPool3D"], onnx_op="MaxPool")
|
| 594 |
+
class PoolOp:
|
| 595 |
+
@classmethod
|
| 596 |
+
def version_1(cls, ctx, node, **kwargs):
|
| 597 |
+
cls._convert(ctx, node, **kwargs)
|
| 598 |
+
|
| 599 |
+
@classmethod
|
| 600 |
+
def version_10(cls, ctx, node, **kwargs):
|
| 601 |
+
cls._convert(ctx, node, **kwargs)
|
| 602 |
+
|
| 603 |
+
@classmethod
|
| 604 |
+
def version_11(cls, ctx, node, **kwargs):
|
| 605 |
+
# no change
|
| 606 |
+
cls._convert(ctx, node, **kwargs)
|
| 607 |
+
|
| 608 |
+
@classmethod
|
| 609 |
+
def _convert(cls, ctx, node, **kwargs):
|
| 610 |
+
# T output = MaxPool(T input, @list(int) ksize, @list(int) strides, @string padding, @string data_format)
|
| 611 |
+
# T Y = MaxPool(T X, @AttrType.STRING auto_pad, @AttrType.INTS kernel_shape, @AttrType.INTS pads,
|
| 612 |
+
# @AttrType.INTS strides)
|
| 613 |
+
# above seems wrong - input[1] is ksize, input[2] is strides
|
| 614 |
+
# stride and ksize in tf is not always NHWC, so watch out when converting into onnx's NCHW
|
| 615 |
+
if kwargs["tf_op"] in ["AvgPool3D", "MaxPool3D"]:
|
| 616 |
+
spatial = 3
|
| 617 |
+
else:
|
| 618 |
+
spatial = 2
|
| 619 |
+
|
| 620 |
+
origin_dtype = ctx.get_dtype(node.output[0])
|
| 621 |
+
if origin_dtype not in [onnx_pb.TensorProto.FLOAT16, onnx_pb.TensorProto.FLOAT, onnx_pb.TensorProto.DOUBLE]:
|
| 622 |
+
# the onnx spec doesn't allow int types for pool ops
|
| 623 |
+
input_shapes = [ctx.get_shape(node.input[0])]
|
| 624 |
+
output_shapes = [ctx.get_shape(node.output[0])]
|
| 625 |
+
cast_node = ctx.make_node("Cast", [node.input[0]], dtypes=[onnx_pb.TensorProto.FLOAT], shapes=input_shapes,
|
| 626 |
+
name=node.name + "_cast", attr={"to": onnx_pb.TensorProto.FLOAT})
|
| 627 |
+
_ = ctx.insert_node_on_output(cast_node, node.inputs[0].output[0])
|
| 628 |
+
cast_back_node = ctx.make_node("Cast", [node.output[0]], dtypes=[origin_dtype], shapes=output_shapes,
|
| 629 |
+
name=node.name + "_castback", attr={"to": origin_dtype})
|
| 630 |
+
_ = ctx.insert_node_on_output(cast_back_node, node.output[0])
|
| 631 |
+
|
| 632 |
+
if len(node.input) < 3:
|
| 633 |
+
kernel_shape_tf = node.get_attr("ksize").ints
|
| 634 |
+
strides_tf = node.get_attr("strides").ints
|
| 635 |
+
else:
|
| 636 |
+
kernel_shape_tf = node.inputs[1].get_tensor_value()
|
| 637 |
+
strides_tf = node.inputs[2].get_tensor_value()
|
| 638 |
+
ctx.remove_input(node, node.input[2], 2)
|
| 639 |
+
ctx.remove_input(node, node.input[1], 1)
|
| 640 |
+
|
| 641 |
+
kernel_shape_hw = parse_dims_attr(node, kernel_shape_tf, spatial)
|
| 642 |
+
strides_hw = parse_dims_attr(node, strides_tf, spatial)
|
| 643 |
+
|
| 644 |
+
node.set_attr("kernel_shape", kernel_shape_hw)
|
| 645 |
+
node.set_attr("strides", strides_hw)
|
| 646 |
+
dilations = conv_dims_attr(node, "dilations", spatial=spatial)
|
| 647 |
+
add_padding(ctx, node, kernel_shape_hw, strides_hw, dilations=dilations, spatial=spatial)
|
| 648 |
+
conv_convert_inputs(ctx, node, with_kernel=False, spatial=spatial)
|
| 649 |
+
|
| 650 |
+
|
| 651 |
+
@tf_op(["MaxPoolWithArgmax"], onnx_op="MaxPool")
|
| 652 |
+
class MaxPoolWithArgmaxOp:
|
| 653 |
+
@classmethod
|
| 654 |
+
def version_8(cls, ctx, node, **kwargs):
|
| 655 |
+
# T output = MaxPool(T input, @list(int) ksize, @list(int) strides, @string padding, @string data_format)
|
| 656 |
+
|
| 657 |
+
# Set kernel_shape attribute
|
| 658 |
+
kernel_shape = node.get_attr("ksize").ints
|
| 659 |
+
kernel_shape = [kernel_shape[1], kernel_shape[2]]
|
| 660 |
+
node.set_attr("kernel_shape", kernel_shape)
|
| 661 |
+
|
| 662 |
+
# Set strides attribute
|
| 663 |
+
strides = node.get_attr("strides").ints
|
| 664 |
+
strides = [strides[1], strides[2]]
|
| 665 |
+
node.set_attr("strides", strides)
|
| 666 |
+
|
| 667 |
+
# The input data_format is NHWC for TF MaxPoolWithArgmax
|
| 668 |
+
node.set_attr("data_format", "NHWC")
|
| 669 |
+
|
| 670 |
+
add_padding(ctx, node, kernel_shape, strides)
|
| 671 |
+
conv_convert_inputs(ctx, node, with_kernel=False, input_indices=[0], output_indices=[0, 1])
|
| 672 |
+
|
| 673 |
+
|
| 674 |
+
@tf_op(["BiasAdd", "BiasAddV1"])
|
| 675 |
+
class BiasAdd:
|
| 676 |
+
@classmethod
|
| 677 |
+
def version_1(cls, ctx, node, **kwargs):
|
| 678 |
+
# T output = BiasAdd(T value, T bias, @string data_format)
|
| 679 |
+
# T output = BiasAddV1(T value, T bias)
|
| 680 |
+
# TODO: for now use add. We may need to convert to NCHW.
|
| 681 |
+
node.type = "Add"
|
| 682 |
+
common.BroadcastOp.version_1(ctx, node, **kwargs)
|
| 683 |
+
|
| 684 |
+
@classmethod
|
| 685 |
+
def version_7(cls, ctx, node, **kwargs):
|
| 686 |
+
# T output = BiasAdd(T value, T bias, @string data_format)
|
| 687 |
+
# T output = BiasAddV1(T value, T bias)
|
| 688 |
+
# According TF bias_add definition, the input dim is always only 1.
|
| 689 |
+
node.type = "Add"
|
| 690 |
+
common.BroadcastOp.version_6(ctx, node, **kwargs)
|
| 691 |
+
|
| 692 |
+
# on NHWC, bias will broadcast from largest dim, which is default onnx Add op broadcast behavior.
|
| 693 |
+
if not node.is_nhwc():
|
| 694 |
+
# however, in NCHW, bias should be at 2nd dim, which by default onnx Add op has no way to know,
|
| 695 |
+
# so it needs being reshaped into 3-dim tensor before add
|
| 696 |
+
shape0 = ctx.get_shape(node.input[0])
|
| 697 |
+
shape1 = ctx.get_shape(node.input[1])
|
| 698 |
+
if node.inputs[1].type == 'Const' and len(shape1) == 1:
|
| 699 |
+
new_broadcast_shape = [shape1[0]] + [1] * (len(shape0) - 2)
|
| 700 |
+
shape_name = utils.make_name(node.name)
|
| 701 |
+
ctx.make_const(shape_name, np.array(new_broadcast_shape, dtype=np.int64))
|
| 702 |
+
op_name = node.input[1]
|
| 703 |
+
reshape_node = ctx.make_node("Reshape", [op_name, shape_name])
|
| 704 |
+
ctx.replace_input(node, op_name, reshape_node.output[0], 1)
|
| 705 |
+
ctx.set_shape(reshape_node.output[0], new_broadcast_shape)
|
| 706 |
+
|
| 707 |
+
|
| 708 |
+
@tf_op(["Pad", "PadV2", "MirrorPad"], onnx_op="Pad")
|
| 709 |
+
class Pad:
|
| 710 |
+
@classmethod
|
| 711 |
+
def version_1(cls, ctx, node, **kwargs):
|
| 712 |
+
node.type = "Pad"
|
| 713 |
+
# T output = Pad(T input, int32 paddings, @type Tpaddings), CONST model using default value
|
| 714 |
+
# or PadV2(T input, int32 paddings, T constant_value, @type Tpaddings), CONST mode - default value specified
|
| 715 |
+
# or MirrorPad(T input, int32 paddings, @type Tpaddings, @STRING mode), other mode.
|
| 716 |
+
# T output = Pad(T data, @STRING mode, @INTS pads, @FLOAT value)
|
| 717 |
+
paddings = np.array(node.inputs[1].get_tensor_value()).transpose().flatten()
|
| 718 |
+
mode = node.get_attr("mode")
|
| 719 |
+
if mode:
|
| 720 |
+
mode = mode.s.decode("utf-8").lower()
|
| 721 |
+
node.set_attr("mode", mode)
|
| 722 |
+
if mode not in [None, "constant", "reflect"]:
|
| 723 |
+
raise ValueError(mode + " pad mode is not supported")
|
| 724 |
+
|
| 725 |
+
if mode in [None, "constant"] and len(node.input) == 3:
|
| 726 |
+
const_val = node.inputs[2].get_tensor_value()
|
| 727 |
+
node.set_attr("value", const_val)
|
| 728 |
+
ctx.remove_input(node, node.input[2], 2)
|
| 729 |
+
|
| 730 |
+
ctx.remove_input(node, node.input[1], 1)
|
| 731 |
+
node.set_attr("pads", paddings)
|
| 732 |
+
|
| 733 |
+
origin_dtype = ctx.get_dtype(node.output[0])
|
| 734 |
+
if origin_dtype not in [onnx_pb.TensorProto.FLOAT16, onnx_pb.TensorProto.FLOAT,
|
| 735 |
+
onnx_pb.TensorProto.DOUBLE]:
|
| 736 |
+
cast_node = ctx.insert_new_node_on_input(node, "Cast", node.input[0], to=onnx_pb.TensorProto.FLOAT)
|
| 737 |
+
ctx.set_dtype(cast_node.output[0], onnx_pb.TensorProto.FLOAT)
|
| 738 |
+
ctx.copy_shape(node.name, cast_node.output[0])
|
| 739 |
+
|
| 740 |
+
cast_back_node = ctx.insert_new_node_on_output("Cast", node.output[0],
|
| 741 |
+
name=utils.make_name(node.name) + "_castback",
|
| 742 |
+
to=origin_dtype)
|
| 743 |
+
ctx.set_dtype(cast_back_node.output[0], origin_dtype)
|
| 744 |
+
ctx.copy_shape(node.name, cast_back_node.output[0])
|
| 745 |
+
|
| 746 |
+
@classmethod
|
| 747 |
+
def version_11(cls, ctx, node, **kwargs):
|
| 748 |
+
mode = node.get_attr("mode")
|
| 749 |
+
if mode:
|
| 750 |
+
mode = mode.s.decode("utf-8").lower()
|
| 751 |
+
node.set_attr("mode", mode)
|
| 752 |
+
if mode not in [None, "constant", "reflect"]:
|
| 753 |
+
raise ValueError(mode + " pad mode is not supported")
|
| 754 |
+
|
| 755 |
+
# pads must be int64.
|
| 756 |
+
if ctx.get_dtype(node.input[1]) != onnx_pb.TensorProto.INT64:
|
| 757 |
+
ctx.insert_new_node_on_input(node, "Cast", node.input[1], to=onnx_pb.TensorProto.INT64)
|
| 758 |
+
ctx.insert_new_node_on_input(node, "Transpose", node.input[1])
|
| 759 |
+
shape_const = ctx.make_const(utils.make_name(node.name), np.array([-1]).astype(np.int64))
|
| 760 |
+
ctx.insert_new_node_on_input(node, "Reshape", [node.input[1], shape_const.name])
|
| 761 |
+
|
| 762 |
+
origin_dtype = ctx.get_dtype(node.output[0])
|
| 763 |
+
if origin_dtype not in [TensorProto.FLOAT, TensorProto.DOUBLE,
|
| 764 |
+
TensorProto.INT32, TensorProto.INT64]:
|
| 765 |
+
cast_node = ctx.insert_new_node_on_input(node, "Cast", node.input[0], to=TensorProto.FLOAT)
|
| 766 |
+
ctx.set_dtype(cast_node.output[0], TensorProto.FLOAT)
|
| 767 |
+
ctx.copy_shape(node.name, cast_node.output[0])
|
| 768 |
+
|
| 769 |
+
cast_back_node = ctx.insert_new_node_on_output("Cast", node.output[0],
|
| 770 |
+
name=utils.make_name(node.name) + "_castback",
|
| 771 |
+
to=origin_dtype)
|
| 772 |
+
ctx.set_dtype(cast_back_node.output[0], origin_dtype)
|
| 773 |
+
ctx.copy_shape(node.name, cast_back_node.output[0])
|
| 774 |
+
|
| 775 |
+
|
| 776 |
+
@tf_op(["FusedBatchNorm", "FusedBatchNormV2", "FusedBatchNormV3"])
|
| 777 |
+
class BatchNorm:
|
| 778 |
+
@classmethod
|
| 779 |
+
def version_6(cls, ctx, node, **kwargs):
|
| 780 |
+
tf_type = node.type
|
| 781 |
+
node.type = "BatchNormalization"
|
| 782 |
+
# tf inputs: x, scale, bias, mean, variance
|
| 783 |
+
# tf outputs: y, batch_mean, batch_var
|
| 784 |
+
# a: data_format, epsilon, is_training
|
| 785 |
+
# onnx inputs: X, scale, B, mean, variance, attributes: epsilon, momentum=0.9, spatial : 1
|
| 786 |
+
# output: y, mean, var, savedmean, savedvar,
|
| 787 |
+
# detach unused outputs. While we could let the unused outputs dangle,
|
| 788 |
+
# some runtimes like pytorch/caffe2 do complain about it.
|
| 789 |
+
|
| 790 |
+
# onnx batchnorm requires same T for all inputs
|
| 791 |
+
mean_type = ctx.get_dtype(node.input[3])
|
| 792 |
+
x_dtype = ctx.get_dtype(node.input[0])
|
| 793 |
+
if x_dtype != mean_type:
|
| 794 |
+
# TODO: this works but more efficient would be to flip the other inputs. We'd need to check
|
| 795 |
+
# TODO: first if this works with the onnx implementation so its a later for now
|
| 796 |
+
ctx.insert_new_node_on_input(node, "Cast", node.input[0], to=mean_type)
|
| 797 |
+
# casting the input[0] will change the output dtype of bn so we need to cast back
|
| 798 |
+
cast_back_node = ctx.insert_new_node_on_output("Cast", node.output[0],
|
| 799 |
+
name=utils.make_name(node.name) + "_castback",
|
| 800 |
+
to=x_dtype)
|
| 801 |
+
ctx.set_dtype(cast_back_node.output[0], x_dtype)
|
| 802 |
+
ctx.copy_shape(node.name, cast_back_node.output[0])
|
| 803 |
+
|
| 804 |
+
consumers = [ctx.find_output_consumers(output_name) for output_name in node.output[1:]]
|
| 805 |
+
if not any(consumers):
|
| 806 |
+
new_output = [node.output[0]]
|
| 807 |
+
# the setter makes a copy of new_output
|
| 808 |
+
node.output = new_output
|
| 809 |
+
|
| 810 |
+
conv_convert_inputs(ctx, node, with_kernel=False)
|
| 811 |
+
|
| 812 |
+
inp_shape = ctx.get_shape(node.input[0])
|
| 813 |
+
inp_rank = len(inp_shape) if inp_shape is not None else None
|
| 814 |
+
scale_shape = ctx.get_shape(node.input[1])
|
| 815 |
+
mean_shape = ctx.get_shape(node.input[3])
|
| 816 |
+
var_shape = ctx.get_shape(node.input[4])
|
| 817 |
+
val_type = utils.map_onnx_to_numpy_type(ctx.get_dtype(node.input[1]))
|
| 818 |
+
is_training = node.get_attr_value('is_training', True)
|
| 819 |
+
|
| 820 |
+
if is_training and node.get_attr_value('exponential_avg_factor', 1.0) == 1.0:
|
| 821 |
+
# Sometimes TF uses a BatchNorm op with training = True and exponential_avg_factor = 1.0
|
| 822 |
+
# to perform layer mean/variance normalization. In such cases, the mean/var are computed from the input.
|
| 823 |
+
# TF allows mean/variance to be excluded only if is_training and exponential_avg_factor == 1.0
|
| 824 |
+
utils.make_sure(inp_rank is not None, "Cannot convert node %s of type %s with input of unknown rank.",
|
| 825 |
+
node.name, tf_type)
|
| 826 |
+
dims = [0] + list(range(2, inp_rank))
|
| 827 |
+
avg = ctx.make_node("ReduceMean", [node.input[0]], attr={'axes': dims, 'keepdims': True}).output[0]
|
| 828 |
+
avg_squeezed = GraphBuilder(ctx).make_squeeze({"data": avg, "axes": dims})
|
| 829 |
+
sub = ctx.make_node("Sub", [node.input[0], avg]).output[0]
|
| 830 |
+
var_squeezed = ctx.make_node("ReduceSumSquare", [sub], attr={'axes': dims, 'keepdims': False}).output[0]
|
| 831 |
+
|
| 832 |
+
inp_shape = ctx.make_node("Shape", [node.input[0]]).output[0]
|
| 833 |
+
dims_const = ctx.make_const(utils.make_name("axes_const"), np.array(dims, dtype=np.int64)).output[0]
|
| 834 |
+
reduce_dims = ctx.make_node("Gather", [inp_shape, dims_const]).output[0]
|
| 835 |
+
dims_product = ctx.make_node("ReduceProd", [reduce_dims], attr={'axes': [0], 'keepdims': False})
|
| 836 |
+
cnt_float = ctx.make_node("Cast", [dims_product.output[0]], attr={'to': ctx.get_dtype(node.input[0])})
|
| 837 |
+
|
| 838 |
+
pop_var_squeezed = ctx.make_node("Div", [var_squeezed, cnt_float.output[0]]).output[0]
|
| 839 |
+
ctx.replace_inputs(node, node.input[:3] + [avg_squeezed, pop_var_squeezed])
|
| 840 |
+
elif is_training:
|
| 841 |
+
logger.warning("Node %s of type %s has is_training set to true, which is not supperted. "
|
| 842 |
+
"Please re-save the model with training set to false.",
|
| 843 |
+
node.name, tf_type)
|
| 844 |
+
# As long as the mean/variance estimates are provided, we should be OK
|
| 845 |
+
is_training = False
|
| 846 |
+
|
| 847 |
+
if not is_training and mean_shape != scale_shape and all(d >= 0 for d in scale_shape):
|
| 848 |
+
new_mean_value = np.array(np.resize(node.inputs[3].get_tensor_value(as_list=False), scale_shape),
|
| 849 |
+
dtype=val_type)
|
| 850 |
+
new_mean_node_name = utils.make_name(node.name)
|
| 851 |
+
ctx.make_const(new_mean_node_name, new_mean_value)
|
| 852 |
+
ctx.replace_input(node, node.input[3], new_mean_node_name, 3)
|
| 853 |
+
|
| 854 |
+
if not is_training and var_shape != scale_shape and all(d >= 0 for d in scale_shape):
|
| 855 |
+
new_var_value = np.array(np.resize(node.inputs[4].get_tensor_value(as_list=False), scale_shape),
|
| 856 |
+
dtype=val_type)
|
| 857 |
+
new_val_node_name = utils.make_name(node.name)
|
| 858 |
+
ctx.make_const(new_val_node_name, new_var_value)
|
| 859 |
+
ctx.replace_input(node, node.input[4], new_val_node_name, 4)
|
| 860 |
+
|
| 861 |
+
@classmethod
|
| 862 |
+
def version_9(cls, ctx, node, **kwargs):
|
| 863 |
+
# is_test was removed - no change for us
|
| 864 |
+
cls.version_6(ctx, node, **kwargs)
|
| 865 |
+
|
| 866 |
+
|
| 867 |
+
@tf_op(["SpaceToDepth"])
|
| 868 |
+
class SpaceToDepth:
|
| 869 |
+
@classmethod
|
| 870 |
+
def version_1(cls, ctx, node, **kwargs):
|
| 871 |
+
block_size = node.get_attr("block_size")
|
| 872 |
+
node.set_attr("blocksize", block_size.i)
|
| 873 |
+
conv_convert_inputs(ctx, node, with_kernel=False)
|
| 874 |
+
|
| 875 |
+
|
| 876 |
+
@tf_op(["DepthToSpace"])
|
| 877 |
+
class DepthToSpace:
|
| 878 |
+
@classmethod
|
| 879 |
+
def version_1(cls, ctx, node, **kwargs):
|
| 880 |
+
block_size = node.get_attr("block_size")
|
| 881 |
+
node.set_attr("blocksize", block_size.i)
|
| 882 |
+
conv_convert_inputs(ctx, node, with_kernel=False)
|
| 883 |
+
|
| 884 |
+
@classmethod
|
| 885 |
+
def version_11(cls, ctx, node, **kwargs):
|
| 886 |
+
# Onnx-11 CRD mode added. No change for tf2onnx
|
| 887 |
+
cls.version_1(ctx, node, **kwargs)
|
| 888 |
+
|
| 889 |
+
|
| 890 |
+
@tf_op(["CropAndResize"])
|
| 891 |
+
class CropAndResize:
|
| 892 |
+
@classmethod
|
| 893 |
+
def version_10(cls, ctx, node, **kwargs):
|
| 894 |
+
utils.make_sure(node.inputs[1].type == "Const", "boxes input must be a Const")
|
| 895 |
+
utils.make_sure(node.inputs[3].type == "Const", "boxes input must be a Const")
|
| 896 |
+
name = node.name
|
| 897 |
+
output_height = node.inputs[3].get_tensor_value()[0]
|
| 898 |
+
output_width = node.inputs[3].get_tensor_value()[1]
|
| 899 |
+
rois = node.inputs[1].get_tensor_value()
|
| 900 |
+
rois_shape = ctx.get_shape(node.input[1])
|
| 901 |
+
img_shape = ctx.get_shape(node.input[0])
|
| 902 |
+
transform_rois = np.zeros(list(rois_shape), dtype=np.float32)
|
| 903 |
+
for i in range(rois_shape[0]):
|
| 904 |
+
y1, x1, y2, x2 = rois[i]
|
| 905 |
+
y1 = y1 * (img_shape[1] - 1)
|
| 906 |
+
y2 = y2 * (img_shape[1] - 1)
|
| 907 |
+
x1 = x1 * (img_shape[2] - 1)
|
| 908 |
+
x2 = x2 * (img_shape[2] - 1)
|
| 909 |
+
spacing_h = (y2 - y1)
|
| 910 |
+
spacing_w = (x2 - x1)
|
| 911 |
+
b1 = y1 - 0.5 * spacing_h / (output_height - 1)
|
| 912 |
+
a1 = x1 - 0.5 * spacing_w / (output_width - 1)
|
| 913 |
+
b2 = y2 + 0.5 * spacing_h / (output_height - 1)
|
| 914 |
+
a2 = x2 + 0.5 * spacing_w / (output_width - 1)
|
| 915 |
+
transform_rois[i][0] = a1
|
| 916 |
+
transform_rois[i][1] = b1
|
| 917 |
+
transform_rois[i][2] = a2
|
| 918 |
+
transform_rois[i][3] = b2
|
| 919 |
+
cast_node = ctx.make_node("Cast", [node.input[2]], attr={"to": onnx_pb.TensorProto.INT64})
|
| 920 |
+
bbox_node = ctx.make_const(utils.make_name("bbox"), transform_rois)
|
| 921 |
+
dtypes = [ctx.get_dtype(node.output[0])]
|
| 922 |
+
shapes = [ctx.get_shape(node.output[0])]
|
| 923 |
+
input_nchw = ctx.make_node("Transpose", [node.input[0]], {"perm": [0, 3, 1, 2]},
|
| 924 |
+
name=utils.make_name(node.name))
|
| 925 |
+
crop_and_resize = ctx.make_node("RoiAlign", inputs=[input_nchw.output[0], bbox_node.output[0],
|
| 926 |
+
cast_node.output[0]],
|
| 927 |
+
attr={"output_height": output_height, "output_width": output_width,
|
| 928 |
+
"spatial_scale": 1.0, "sampling_ratio": 1},
|
| 929 |
+
name=utils.make_name(node.name), dtypes=dtypes, shapes=shapes)
|
| 930 |
+
ctx.remove_node(name)
|
| 931 |
+
ctx.make_node("Transpose", crop_and_resize.output, {"perm": [0, 2, 3, 1]},
|
| 932 |
+
name=name, outputs=node.output, shapes=shapes, dtypes=dtypes)
|
| 933 |
+
|
| 934 |
+
@classmethod
|
| 935 |
+
def any_version_after11(cls, opset, ctx, node, **kwargs):
|
| 936 |
+
# create loop of resize to cater to tensorflow CropAndResize, one box one iteration
|
| 937 |
+
mode = "nearest" if node.get_attr("method") is not None and node.get_attr(
|
| 938 |
+
"method").s == b"nearest" else "linear"
|
| 939 |
+
extrapolation_value = float(node.get_attr("extrapolation_value", "0").f)
|
| 940 |
+
input_x = node.input[0]
|
| 941 |
+
boxes = node.input[1]
|
| 942 |
+
box_ind = node.input[2]
|
| 943 |
+
crop_size = node.input[3]
|
| 944 |
+
trip_name = utils.make_name(node.name + "_i")
|
| 945 |
+
cond_name = utils.make_name(node.name + "_cond")
|
| 946 |
+
cond_out_name = utils.make_name(node.name + "cond_out")
|
| 947 |
+
g = ctx.create_new_graph_with_same_config()
|
| 948 |
+
g.add_graph_input(trip_name, TensorProto.INT64, [1])
|
| 949 |
+
g.add_graph_input(cond_name, TensorProto.BOOL, [])
|
| 950 |
+
g.parent_graph = ctx
|
| 951 |
+
const_zero = g.make_const(utils.make_name(node.name + "_const_zero"), np.array([0], dtype=np.int32))
|
| 952 |
+
const_zero_long = g.make_const(utils.make_name(node.name + "_const_zero_long"), np.array([0], dtype=np.int64))
|
| 953 |
+
const_one = g.make_const(utils.make_name(node.name + "_const_one"), np.array([1], dtype=np.int32))
|
| 954 |
+
const_one_long = g.make_const(utils.make_name(node.name + "_const_one_long"), np.array([1], dtype=np.int64))
|
| 955 |
+
index_end = g.make_node("Add", [trip_name, const_one_long.output[0]])
|
| 956 |
+
box_index_from = g.make_node("Slice", [box_ind, trip_name, index_end.output[0]], name="Slice_a")
|
| 957 |
+
box_index_to = g.make_node("Add", [box_index_from.output[0], const_one.output[0]])
|
| 958 |
+
target_x = g.make_node("Slice", [input_x, box_index_from.output[0], box_index_to.output[0],
|
| 959 |
+
const_zero.output[0]], name="Slice_b")
|
| 960 |
+
transposed_x = g.make_node("Transpose", [target_x.output[0]], attr={'perm': constants.NHWC_TO_NCHW})
|
| 961 |
+
const_zero_zero = g.make_const(utils.make_name(node.name + "_const_zero_zero"),
|
| 962 |
+
np.array([0, 0], dtype=np.float32))
|
| 963 |
+
const_one_one = g.make_const(utils.make_name(node.name + "_const_one_one"),
|
| 964 |
+
np.array([1, 1], dtype=np.float32))
|
| 965 |
+
const_four = g.make_const(utils.make_name(node.name + "_const_four"), np.array([4], dtype=np.int64))
|
| 966 |
+
const_empty_float = g.make_const(utils.make_name("const_empty_float"), np.array([], dtype=np.float32))
|
| 967 |
+
box = g.make_node("Slice", [boxes, trip_name, index_end.output[0], const_zero_long.output[0]],
|
| 968 |
+
name="Slice_c")
|
| 969 |
+
roi_raw = g.make_node("Reshape", [box.output[0], const_four.output[0]])
|
| 970 |
+
roi_raw_first_half = GraphBuilder(g).make_slice({"data": roi_raw.output[0], "ends": [2], "starts": [0]})
|
| 971 |
+
roi_raw_second_half = GraphBuilder(g).make_slice({"data": roi_raw.output[0], "ends": [4], "starts": [2]})
|
| 972 |
+
roi_concat_1 = g.make_node("Concat", [const_zero_zero.output[0], roi_raw_first_half], attr={'axis': 0})
|
| 973 |
+
roi_concat_2 = g.make_node("Concat", [const_one_one.output[0], roi_raw_second_half], attr={'axis': 0})
|
| 974 |
+
final_roi = g.make_node("Concat", [roi_concat_1.output[0], roi_concat_2.output[0]], attr={'axis': 0})
|
| 975 |
+
final_crop_size = build_dynamic_target_size(g, transposed_x.output[0], crop_size)
|
| 976 |
+
resized_x = g.make_node("Resize", [transposed_x.output[0], final_roi.output[0], const_empty_float.output[0],
|
| 977 |
+
final_crop_size.output[0]],
|
| 978 |
+
attr={"mode": mode, "extrapolation_value": extrapolation_value,
|
| 979 |
+
"coordinate_transformation_mode": "tf_crop_and_resize"})
|
| 980 |
+
recovered_x = g.make_node("Transpose", [resized_x.output[0]], attr={'perm': constants.NCHW_TO_NHWC})
|
| 981 |
+
squeeze_x = GraphBuilder(g).make_squeeze({'data': recovered_x.output[0], 'axes': [0]}, return_node=True)
|
| 982 |
+
g.make_node("Identity", [cond_name], outputs=[cond_out_name])
|
| 983 |
+
g.add_graph_output(cond_out_name, TensorProto.BOOL, [])
|
| 984 |
+
g.add_graph_output(squeeze_x.output[0], ctx.get_dtype(node.input[0]), [-1, -1, -1])
|
| 985 |
+
trip_node = ctx.make_node("Size", [box_ind])
|
| 986 |
+
cond_const = ctx.make_const(utils.make_name("cond"), np.ones((), dtype=np.bool))
|
| 987 |
+
ctx.remove_node(node.name)
|
| 988 |
+
branches = {"body": g}
|
| 989 |
+
inner_loop = ctx.make_node("Loop", [trip_node.output[0], cond_const.output[0]], name=node.name,
|
| 990 |
+
outputs=node.output, branches=branches)
|
| 991 |
+
|
| 992 |
+
@classmethod
|
| 993 |
+
def version_11(cls, ctx, node, **kwargs):
|
| 994 |
+
cls.any_version_after11(11, ctx, node, **kwargs)
|
| 995 |
+
|
| 996 |
+
@classmethod
|
| 997 |
+
def version_13(cls, ctx, node, **kwargs):
|
| 998 |
+
# Signature of operator Squeeze changed.
|
| 999 |
+
cls.any_version_after11(13, ctx, node, **kwargs)
|
| 1000 |
+
|
| 1001 |
+
|
| 1002 |
+
@tf_op(["ResizeBilinear", "ResizeNearestNeighbor", "ResizeBicubic"])
|
| 1003 |
+
class Resize:
|
| 1004 |
+
@classmethod
|
| 1005 |
+
def version_7(cls, ctx, node, **kwargs):
|
| 1006 |
+
utils.make_sure(node.type != "ResizeBicubic", "Opset 11 is required for bicubic interpolation for node %s",
|
| 1007 |
+
node.name)
|
| 1008 |
+
mode = "linear" if node.type == "ResizeBilinear" else "nearest"
|
| 1009 |
+
node.type = "Upsample"
|
| 1010 |
+
shape = ctx.get_shape(node.input[0])
|
| 1011 |
+
target_shape = node.inputs[1].get_tensor_value()
|
| 1012 |
+
# https://www.tensorflow.org/api_docs/python/tf/image/resize_nearest_neighbor
|
| 1013 |
+
# wants the input to be NHWC - adjust target_shape to this.
|
| 1014 |
+
n, h, w, c = shape
|
| 1015 |
+
nh, nw = target_shape
|
| 1016 |
+
utils.make_sure(all(i != -1 for i in [nh, nw]), "h and w need to be known")
|
| 1017 |
+
# scaler is nchw
|
| 1018 |
+
scaler = [1., 1., float(nh) / h, float(nw) / w]
|
| 1019 |
+
node.set_attr("scales", scaler)
|
| 1020 |
+
node.set_attr("mode", mode)
|
| 1021 |
+
ctx.remove_input(node, node.input[1], 1)
|
| 1022 |
+
node.data_format = "NHWC"
|
| 1023 |
+
conv_convert_inputs(ctx, node, with_kernel=False)
|
| 1024 |
+
|
| 1025 |
+
@classmethod
|
| 1026 |
+
def version_9(cls, ctx, node, **kwargs):
|
| 1027 |
+
cls._convert_since_9(ctx, node, op_type="Upsample")
|
| 1028 |
+
|
| 1029 |
+
@classmethod
|
| 1030 |
+
def version_10(cls, ctx, node, **kwargs):
|
| 1031 |
+
cls._convert_since_9(ctx, node, op_type="Resize")
|
| 1032 |
+
|
| 1033 |
+
@classmethod
|
| 1034 |
+
def version_11(cls, ctx, node, **kwargs):
|
| 1035 |
+
cubic_coeff_a = None
|
| 1036 |
+
exclude_outside = False
|
| 1037 |
+
if node.type == "ResizeBilinear":
|
| 1038 |
+
mode = "linear"
|
| 1039 |
+
elif node.type == "ResizeBicubic":
|
| 1040 |
+
mode = "cubic"
|
| 1041 |
+
cubic_coeff_a = -0.5
|
| 1042 |
+
exclude_outside = True
|
| 1043 |
+
else:
|
| 1044 |
+
mode = "nearest"
|
| 1045 |
+
roi = ctx.make_const(utils.make_name("roi"), np.array([]).astype(np.float32))
|
| 1046 |
+
const_zero = ctx.make_const(utils.make_name("const_zero"), np.array([0]).astype(np.int64))
|
| 1047 |
+
const_two = ctx.make_const(utils.make_name("const_two"), np.array([2]).astype(np.int64))
|
| 1048 |
+
const_empty_float = ctx.make_const(utils.make_name("const_empty_float"), np.array([]).astype(np.float32))
|
| 1049 |
+
input_nchw = ctx.make_node("Transpose", [node.input[0]], {"perm": constants.NHWC_TO_NCHW})
|
| 1050 |
+
shape_input = ctx.make_node("Shape", [input_nchw.output[0]])
|
| 1051 |
+
sliced_shape = ctx.make_node("Slice", [shape_input.output[0], const_zero.output[0], const_two.output[0]])
|
| 1052 |
+
size_int64 = ctx.make_node("Cast", [node.input[1]], attr={"to": onnx_pb.TensorProto.INT64})
|
| 1053 |
+
concat_shape = ctx.make_node("Concat", [sliced_shape.output[0], size_int64.output[0]], {'axis': 0})
|
| 1054 |
+
resize_inputs = [
|
| 1055 |
+
input_nchw.output[0],
|
| 1056 |
+
roi.output[0],
|
| 1057 |
+
const_empty_float.output[0],
|
| 1058 |
+
concat_shape.output[0]
|
| 1059 |
+
]
|
| 1060 |
+
transformation_mode = "asymmetric"
|
| 1061 |
+
nearest_mode = "floor"
|
| 1062 |
+
if "align_corners" in node.attr and node.attr["align_corners"].i:
|
| 1063 |
+
transformation_mode = "align_corners"
|
| 1064 |
+
if "half_pixel_centers" in node.attr and node.attr["half_pixel_centers"].i:
|
| 1065 |
+
if node.type == "ResizeNearestNeighbor" and not ctx.is_target(constants.TARGET_TENSORRT):
|
| 1066 |
+
# TensorRT only supports nearest_mode = "floor" for mode = "nearest"
|
| 1067 |
+
transformation_mode = "half_pixel"
|
| 1068 |
+
nearest_mode = "round_prefer_ceil"
|
| 1069 |
+
else:
|
| 1070 |
+
transformation_mode = "half_pixel"
|
| 1071 |
+
attr = {"mode": mode, "nearest_mode": nearest_mode, "coordinate_transformation_mode": transformation_mode,
|
| 1072 |
+
"exclude_outside": exclude_outside}
|
| 1073 |
+
if cubic_coeff_a is not None:
|
| 1074 |
+
attr["cubic_coeff_a"] = cubic_coeff_a
|
| 1075 |
+
resize = ctx.make_node("Resize", resize_inputs, attr=attr)
|
| 1076 |
+
shapes = node.output_shapes
|
| 1077 |
+
dtypes = node.output_dtypes
|
| 1078 |
+
ctx.remove_node(node.name)
|
| 1079 |
+
ctx.make_node("Transpose", resize.output, {"perm": constants.NCHW_TO_NHWC},
|
| 1080 |
+
name=node.name, outputs=node.output, shapes=shapes, dtypes=dtypes)
|
| 1081 |
+
|
| 1082 |
+
@classmethod
|
| 1083 |
+
def _convert_since_9(cls, ctx, node, op_type, use_target_size=False):
|
| 1084 |
+
|
| 1085 |
+
# float32 out = ResizeBilinear/ResizeNearestNeighbor(T images, int size)
|
| 1086 |
+
# https://www.tensorflow.org/api_docs/python/tf/image/resize_nearest_neighbor
|
| 1087 |
+
# wants the input to be NHWC - adjust target_shape to this.
|
| 1088 |
+
utils.make_sure(node.type != "ResizeBicubic", "Opset 11 is required for bicubic interpolation for node %s",
|
| 1089 |
+
node.name)
|
| 1090 |
+
mode = "linear" if node.type == "ResizeBilinear" else "nearest"
|
| 1091 |
+
|
| 1092 |
+
# because onnxruntime only supports to scale the last two dims so transpose is inserted
|
| 1093 |
+
input_nchw = ctx.make_node("Transpose", [node.input[0]], {"perm": constants.NHWC_TO_NCHW})
|
| 1094 |
+
if use_target_size:
|
| 1095 |
+
final_target_size = build_dynamic_target_size(ctx, input_nchw.output[0], node.input[1])
|
| 1096 |
+
roi = ctx.make_const(utils.make_name("roi"), np.array([]).astype(np.float32))
|
| 1097 |
+
const_empty_float = ctx.make_const(utils.make_name("const_empty_float"), np.array([], dtype=np.float32))
|
| 1098 |
+
resize_inputs = [
|
| 1099 |
+
input_nchw.output[0],
|
| 1100 |
+
roi.output[0],
|
| 1101 |
+
const_empty_float.output[0],
|
| 1102 |
+
final_target_size.output[0]
|
| 1103 |
+
]
|
| 1104 |
+
upsample = ctx.make_node("Resize", resize_inputs,
|
| 1105 |
+
attr={"mode": mode, "nearest_mode": "floor",
|
| 1106 |
+
"coordinate_transformation_mode": "asymmetric"})
|
| 1107 |
+
else:
|
| 1108 |
+
# first create "scales" info for onnx upsample
|
| 1109 |
+
# if shape of input and output known then "scale" is calculated statically and set as a const node
|
| 1110 |
+
shape = ctx.get_shape(node.input[0])
|
| 1111 |
+
if shape and shape[2] != -1 and shape[1] != -1 and node.inputs[1].is_const():
|
| 1112 |
+
target_shape = node.inputs[1].get_tensor_value()
|
| 1113 |
+
n, h, w, c = shape
|
| 1114 |
+
nh, nw = target_shape
|
| 1115 |
+
# scales is nchw
|
| 1116 |
+
# the reason not storing data at raw field is because of the bug:
|
| 1117 |
+
# https://github.com/onnx/onnx/issues/1852
|
| 1118 |
+
scale_val = np.array([1.0, 1.0, float(nh) / h, float(nw) / w]).astype(np.float32)
|
| 1119 |
+
scales = ctx.make_const(utils.make_name("scales"), scale_val, raw=False)
|
| 1120 |
+
else:
|
| 1121 |
+
ori_shape = ctx.make_node("Shape", [node.input[0]])
|
| 1122 |
+
attr = {"axes": [0], "starts": [1], "ends": [3]}
|
| 1123 |
+
inputs_map = {"data": ori_shape.output[0], **attr}
|
| 1124 |
+
ori_shape_hw = GraphBuilder(ctx).make_slice(inputs_map)
|
| 1125 |
+
ori_shape_hw_float = ctx.make_node("Cast", [ori_shape_hw], attr={"to": onnx_pb.TensorProto.FLOAT})
|
| 1126 |
+
|
| 1127 |
+
target_hw = node.inputs[1]
|
| 1128 |
+
target_hw_float = ctx.make_node("Cast", target_hw.output, attr={"to": onnx_pb.TensorProto.FLOAT})
|
| 1129 |
+
|
| 1130 |
+
scales_hw = ctx.make_node("Div", [target_hw_float.output[0], ori_shape_hw_float.output[0]])
|
| 1131 |
+
|
| 1132 |
+
const_one_array = ctx.make_const(utils.make_name("one"), np.array([1.0, 1.0]).astype(np.float32))
|
| 1133 |
+
# scales is nchw
|
| 1134 |
+
scales = ctx.make_node("Concat", [const_one_array.output[0], scales_hw.output[0]], {"axis": 0})
|
| 1135 |
+
upsample = ctx.make_node(op_type, [input_nchw.output[0], scales.output[0]], attr={"mode": mode})
|
| 1136 |
+
|
| 1137 |
+
shapes = node.output_shapes
|
| 1138 |
+
dtypes = node.output_dtypes
|
| 1139 |
+
ctx.remove_node(node.name)
|
| 1140 |
+
ctx.make_node("Transpose", upsample.output, {"perm": constants.NCHW_TO_NHWC},
|
| 1141 |
+
name=node.name, outputs=node.output, shapes=shapes, dtypes=dtypes)
|
| 1142 |
+
|
| 1143 |
+
|
| 1144 |
+
@tf_op("AdjustContrastv2")
|
| 1145 |
+
class AdjustContrastv2:
|
| 1146 |
+
@classmethod
|
| 1147 |
+
def version_1(cls, ctx, node, **kwargs):
|
| 1148 |
+
images, contrast_factor = node.input
|
| 1149 |
+
dtype = ctx.get_dtype(images)
|
| 1150 |
+
if ctx.get_dtype(contrast_factor) != dtype:
|
| 1151 |
+
contrast_factor = ctx.make_node("Cast", [dtype], attr={'to': dtype}).output[0]
|
| 1152 |
+
rank = ctx.get_rank(images)
|
| 1153 |
+
utils.make_sure(rank is not None, "AdjustContrastv2 requires input of known rank")
|
| 1154 |
+
# Reduce everything except channels
|
| 1155 |
+
axes_to_reduce = list(range(rank))[:-1]
|
| 1156 |
+
mean = ctx.make_node("ReduceMean", [images], attr={'axes': axes_to_reduce, 'keepdims': True},
|
| 1157 |
+
op_name_scope=node.name).output[0]
|
| 1158 |
+
diff = ctx.make_node("Sub", [images, mean], op_name_scope=node.name).output[0]
|
| 1159 |
+
scaled = ctx.make_node("Mul", [diff, contrast_factor], op_name_scope=node.name).output[0]
|
| 1160 |
+
result = ctx.make_node("Add", [scaled, mean], op_name_scope=node.name).output[0]
|
| 1161 |
+
ctx.replace_all_inputs(node.output[0], result)
|
| 1162 |
+
ctx.remove_node(node.name)
|
| 1163 |
+
|
| 1164 |
+
|
| 1165 |
+
@tf_op("AdjustSaturation")
|
| 1166 |
+
class AdjustSaturation:
|
| 1167 |
+
@classmethod
|
| 1168 |
+
def version_11(cls, ctx, node, **kwargs):
|
| 1169 |
+
images, factor = node.input
|
| 1170 |
+
dtype = ctx.get_dtype(images)
|
| 1171 |
+
np_dtype = utils.map_onnx_to_numpy_type(dtype)
|
| 1172 |
+
k = ctx.make_const(utils.make_name("three"), np.array([3], np.int64)).output[0]
|
| 1173 |
+
ordered, indices = ctx.make_node("TopK", [images, k], attr={'axis': -1}, output_count=2).output
|
| 1174 |
+
# Sorted and separated into channels
|
| 1175 |
+
max_c, mid_c, min_c = ctx.make_node("Split", [ordered], attr={'axis': -1}, output_count=3).output
|
| 1176 |
+
delta = ctx.make_node("Sub", [max_c, min_c]).output[0]
|
| 1177 |
+
scaled_delta = ctx.make_node("Mul", [delta, factor], op_name_scope=node.name).output[0]
|
| 1178 |
+
new_delta = ctx.make_node("Min", [scaled_delta, max_c]).output[0]
|
| 1179 |
+
new_min = ctx.make_node("Sub", [max_c, new_delta]).output[0]
|
| 1180 |
+
delta2 = ctx.make_node("Sub", [mid_c, min_c]).output[0]
|
| 1181 |
+
const_zero = ctx.make_const(utils.make_name("zero"), np.array(0, np_dtype)).output[0]
|
| 1182 |
+
delta_z = ctx.make_node("Equal", [delta, const_zero]).output[0]
|
| 1183 |
+
delta_z_cast = ctx.make_node("Cast", [delta_z], attr={'to': dtype}).output[0]
|
| 1184 |
+
delta_nz = ctx.make_node("Add", [delta, delta_z_cast]).output[0]
|
| 1185 |
+
delta2_scale = ctx.make_node("Div", [new_delta, delta_nz]).output[0]
|
| 1186 |
+
new_delta2 = ctx.make_node("Mul", [delta2, delta2_scale], op_name_scope=node.name).output[0]
|
| 1187 |
+
new_mid = ctx.make_node("Add", [new_min, new_delta2]).output[0]
|
| 1188 |
+
new_ordered = ctx.make_node("Concat", [max_c, new_mid, new_min], attr={'axis': -1}).output[0]
|
| 1189 |
+
# Now put it back in order
|
| 1190 |
+
result = ctx.make_node("GatherElements", [new_ordered, indices], attr={'axis': -1}).output[0]
|
| 1191 |
+
ctx.replace_all_inputs(node.output[0], result)
|
| 1192 |
+
ctx.remove_node(node.name)
|
| 1193 |
+
|
| 1194 |
+
|
| 1195 |
+
@tf_op("MatrixBandPart")
|
| 1196 |
+
class MatrixBandPart:
|
| 1197 |
+
@classmethod
|
| 1198 |
+
def version_7(cls, ctx, node, **kwargs):
|
| 1199 |
+
# T output = MatrixBandPart(T input, int num_lower, int num_upper)
|
| 1200 |
+
# data-flow: first generate mask matrix and then use element-wise mul op
|
| 1201 |
+
input_rank = len(ctx.get_shape(node.input[0]))
|
| 1202 |
+
utils.make_sure(input_rank == 2, error_msg="MatrixBandPart op: only rank 2 is supported")
|
| 1203 |
+
bandpart = [node.inputs[ind].get_tensor_value() for ind in [1, 2]]
|
| 1204 |
+
utils.make_sure(bandpart in [[-1, 0], [0, -1]], "only support Lower/Upper triangular for opset < 11")
|
| 1205 |
+
# methods to generate mask matrix: if lower triangular is needed, then generate column one by one
|
| 1206 |
+
# otherwise row is generated one by one.
|
| 1207 |
+
axis, counter_axis, squeeze_axis = (1, 0, 2) if bandpart == [-1, 0] else (0, 1, 1)
|
| 1208 |
+
# 1: subgraph to implement tf.onelike(input[:, 0]),
|
| 1209 |
+
# no need to worry about the dtype, because bool type is needed as Xor only support bool
|
| 1210 |
+
node_name = utils.make_name("const_zero")
|
| 1211 |
+
const_zero = ctx.make_const(name=node_name, np_val=np.array([0]).astype(np.int32))
|
| 1212 |
+
first_col_or_row = ctx.make_node(op_type="Gather", inputs=[node.input[0], const_zero.output[0]],
|
| 1213 |
+
attr={"axis": axis})
|
| 1214 |
+
first_col_or_row_casted = ctx.make_node(op_type="Cast", inputs=first_col_or_row.output,
|
| 1215 |
+
attr={"to": onnx_pb.TensorProto.BOOL})
|
| 1216 |
+
# line means one col or one row
|
| 1217 |
+
zero_line = ctx.make_node(op_type="Xor", inputs=first_col_or_row_casted.output * 2)
|
| 1218 |
+
one_line = ctx.make_node(op_type="Not", inputs=zero_line.output)
|
| 1219 |
+
|
| 1220 |
+
# 2: "loop" to generate mask matrix: generate col or row of matrix one by one
|
| 1221 |
+
g = ctx.create_new_graph_with_same_config()
|
| 1222 |
+
node_name = utils.make_name("const_zero_bool")
|
| 1223 |
+
const_zero_bool = g.make_const(name=node_name, np_val=np.array([[0]]).astype(np.bool))
|
| 1224 |
+
g.set_dtype(const_zero_bool.output[0], onnx_pb.TensorProto.BOOL)
|
| 1225 |
+
|
| 1226 |
+
g.add_graph_input("trip", onnx_pb.TensorProto.INT64, [])
|
| 1227 |
+
g.add_graph_input("cond", onnx_pb.TensorProto.BOOL, [])
|
| 1228 |
+
g.add_graph_input("line", onnx_pb.TensorProto.BOOL, [-1, -1])
|
| 1229 |
+
|
| 1230 |
+
# shift right the line and add zero at the left.
|
| 1231 |
+
new_line = g.make_node(op_type="Concat", inputs=[const_zero_bool.output[0], "line"],
|
| 1232 |
+
attr={"axis": counter_axis},
|
| 1233 |
+
dtypes=[onnx_pb.TensorProto.BOOL])
|
| 1234 |
+
attr = {"axes": [counter_axis], "starts": [0], "ends": [-1]}
|
| 1235 |
+
inputs_map = {"data": new_line.output[0], **attr}
|
| 1236 |
+
slice_node = GraphBuilder(g).make_slice(inputs_map)
|
| 1237 |
+
|
| 1238 |
+
g.make_node("Identity", ["cond"], outputs=["cond_out"])
|
| 1239 |
+
g.make_node("Identity", ["line"], outputs=["res"])
|
| 1240 |
+
g.make_node("Identity", [slice_node], outputs=["line_out"])
|
| 1241 |
+
|
| 1242 |
+
g.add_graph_output("cond_out", onnx_pb.TensorProto.BOOL, [])
|
| 1243 |
+
g.add_graph_output("line_out", onnx_pb.TensorProto.BOOL, [-1, -1])
|
| 1244 |
+
g.add_graph_output("res", onnx_pb.TensorProto.BOOL, [-1, -1])
|
| 1245 |
+
|
| 1246 |
+
# initial value of body vars
|
| 1247 |
+
shape = ctx.make_node(op_type="Shape", inputs=[node.input[0]]) # dtype of result is int64
|
| 1248 |
+
node_name = utils.make_name("line_num_index")
|
| 1249 |
+
col_or_row_num_index = ctx.make_const(name=node_name, np_val=np.array(axis).astype(np.int32))
|
| 1250 |
+
line_num = ctx.make_node(op_type="Gather", inputs=[shape.output[0], col_or_row_num_index.output[0]])
|
| 1251 |
+
trip_cnt = line_num.output[0]
|
| 1252 |
+
node_name = utils.make_name("true")
|
| 1253 |
+
cond = ctx.make_const(name=node_name, np_val=np.array(1).astype(np.bool))
|
| 1254 |
+
col_init = one_line.output[0]
|
| 1255 |
+
|
| 1256 |
+
branches = {"body": g}
|
| 1257 |
+
loop_node = ctx.make_node(op_type="Loop", inputs=[trip_cnt, cond.output[0], col_init],
|
| 1258 |
+
output_count=2, branches=branches)
|
| 1259 |
+
# convert generated mask matrix from bool to right shape and data type
|
| 1260 |
+
squeeze = GraphBuilder(ctx).make_squeeze(
|
| 1261 |
+
{'data': loop_node.output[1], 'axes': [squeeze_axis]}, return_node=True)
|
| 1262 |
+
cast1 = ctx.make_node(op_type="Cast", inputs=squeeze.output, attr={"to": onnx_pb.TensorProto.FLOAT})
|
| 1263 |
+
if axis == 1:
|
| 1264 |
+
mask_matrix = ctx.make_node(op_type="Transpose", inputs=cast1.output)
|
| 1265 |
+
else:
|
| 1266 |
+
mask_matrix = squeeze
|
| 1267 |
+
cast2 = ctx.make_node(op_type="Cast", inputs=mask_matrix.output,
|
| 1268 |
+
attr={"to": ctx.get_dtype(node.input[0])})
|
| 1269 |
+
shapes = node.output_shapes
|
| 1270 |
+
dtypes = node.output_dtypes
|
| 1271 |
+
ctx.remove_node(node.name)
|
| 1272 |
+
ctx.make_node(op_type="Mul", inputs=[cast2.output[0], node.input[0]],
|
| 1273 |
+
name=node.name, outputs=node.output, shapes=shapes,
|
| 1274 |
+
dtypes=dtypes)
|
| 1275 |
+
|
| 1276 |
+
@classmethod
|
| 1277 |
+
def version_11(cls, ctx, node, **kwargs):
|
| 1278 |
+
num_lower_const = node.inputs[1].get_tensor_value() if node.inputs[1].is_const() else None
|
| 1279 |
+
num_upper_const = node.inputs[2].get_tensor_value() if node.inputs[2].is_const() else None
|
| 1280 |
+
data, num_lower, num_upper = node.input
|
| 1281 |
+
rank = ctx.get_rank(data)
|
| 1282 |
+
int_max_val = utils.get_max_value(np.int64)
|
| 1283 |
+
dtype = ctx.get_dtype(data)
|
| 1284 |
+
if rank == 2:
|
| 1285 |
+
shape = ctx.make_node("Shape", [data]).output[0]
|
| 1286 |
+
else:
|
| 1287 |
+
whole_shape = ctx.make_node("Shape", [data]).output[0]
|
| 1288 |
+
shape = GraphBuilder(ctx).make_slice(
|
| 1289 |
+
{'data': whole_shape, 'starts': [-2], 'ends': [int_max_val], 'axes': [0]})
|
| 1290 |
+
if num_lower_const == 0 and num_upper_const == 0:
|
| 1291 |
+
if rank == 2:
|
| 1292 |
+
identity_node = ctx.make_node("EyeLike", [data]).output[0]
|
| 1293 |
+
else:
|
| 1294 |
+
zero_tensor = helper.make_tensor("value", dtype, dims=[1], vals=[0])
|
| 1295 |
+
const_of_shape = ctx.make_node("ConstantOfShape", [shape], attr={'value': zero_tensor}).output[0]
|
| 1296 |
+
identity_node = ctx.make_node("EyeLike", [const_of_shape]).output[0]
|
| 1297 |
+
shapes = node.output_shapes
|
| 1298 |
+
dtypes = node.output_dtypes
|
| 1299 |
+
ctx.remove_node(node.name)
|
| 1300 |
+
ctx.make_node(op_type="Mul", inputs=[identity_node, data],
|
| 1301 |
+
name=node.name, outputs=node.output, shapes=shapes,
|
| 1302 |
+
dtypes=dtypes)
|
| 1303 |
+
return
|
| 1304 |
+
zero_const = ctx.make_const(utils.make_name("zero"), np.array(0, np.int64)).output[0]
|
| 1305 |
+
one_const = ctx.make_const(utils.make_name("one"), np.array(1, np.int64)).output[0]
|
| 1306 |
+
conditions = []
|
| 1307 |
+
row_cnt = GraphBuilder(ctx).make_slice({'data': shape, 'axes': [0], 'starts': [0], 'ends': [1]})
|
| 1308 |
+
col_cnt = GraphBuilder(ctx).make_slice({'data': shape, 'axes': [0], 'starts': [1], 'ends': [2]})
|
| 1309 |
+
limit = ctx.make_node("Mul", [row_cnt, col_cnt]).output[0]
|
| 1310 |
+
# idx_cnt = ctx.make_node("Range", [zero_const, limit, one_const]).output[0]
|
| 1311 |
+
|
| 1312 |
+
ones_of_shape = ctx.make_node("Expand", [one_const, limit]).output[0]
|
| 1313 |
+
idx_cnt = ctx.make_node("CumSum", [ones_of_shape, zero_const], attr={'exclusive': True}).output[0]
|
| 1314 |
+
|
| 1315 |
+
idx_reshape = ctx.make_node("Reshape", [idx_cnt, shape]).output[0]
|
| 1316 |
+
row_idx = ctx.make_node("Div", [idx_reshape, col_cnt]).output[0]
|
| 1317 |
+
col_idx = ctx.make_node("Mod", [idx_reshape, col_cnt]).output[0]
|
| 1318 |
+
idx_diff = ctx.make_node("Sub", [col_idx, row_idx]).output[0]
|
| 1319 |
+
|
| 1320 |
+
if num_upper_const is None or num_upper_const >= 0:
|
| 1321 |
+
if ctx.get_dtype(num_upper) != TensorProto.INT64:
|
| 1322 |
+
num_upper = ctx.make_node("Cast", [num_upper], attr={'to': TensorProto.INT64}).output[0]
|
| 1323 |
+
greater = ctx.make_node("Greater", [idx_diff, num_upper]).output[0]
|
| 1324 |
+
less_or_equal = ctx.make_node("Not", [greater]).output[0]
|
| 1325 |
+
conditions.append(less_or_equal)
|
| 1326 |
+
if num_lower_const is None or num_lower_const >= 0:
|
| 1327 |
+
if ctx.get_dtype(num_lower) != TensorProto.INT64:
|
| 1328 |
+
num_lower = ctx.make_node("Cast", [num_lower], attr={'to': TensorProto.INT64}).output[0]
|
| 1329 |
+
num_lower_neg = ctx.make_node("Neg", [num_lower]).output[0]
|
| 1330 |
+
greater = ctx.make_node("Greater", [num_lower_neg, idx_diff]).output[0]
|
| 1331 |
+
less_or_equal = ctx.make_node("Not", [greater]).output[0]
|
| 1332 |
+
conditions.append(less_or_equal)
|
| 1333 |
+
if len(conditions) == 0:
|
| 1334 |
+
node.type = "Identity"
|
| 1335 |
+
ctx.replace_inputs(node, [data])
|
| 1336 |
+
return
|
| 1337 |
+
if len(conditions) == 1:
|
| 1338 |
+
cond = conditions[0]
|
| 1339 |
+
if len(conditions) == 2:
|
| 1340 |
+
cond = ctx.make_node("And", conditions).output[0]
|
| 1341 |
+
mask = ctx.make_node("Cast", [cond], attr={'to': ctx.get_dtype(data)}).output[0]
|
| 1342 |
+
shapes = node.output_shapes
|
| 1343 |
+
dtypes = node.output_dtypes
|
| 1344 |
+
ctx.remove_node(node.name)
|
| 1345 |
+
ctx.make_node(op_type="Mul", inputs=[mask, data],
|
| 1346 |
+
name=node.name, outputs=node.output, shapes=shapes,
|
| 1347 |
+
dtypes=dtypes)
|
| 1348 |
+
|
| 1349 |
+
|
| 1350 |
+
def _make_softmax_cross_entropy_with_logits(ctx, label, logit, tf_ori_node):
|
| 1351 |
+
label_dtype = ctx.get_dtype(label.output[0])
|
| 1352 |
+
logit_dtype = ctx.get_dtype(logit.output[0])
|
| 1353 |
+
utils.make_sure(label_dtype == logit_dtype, "the following logic only works on same dtype of label and logit")
|
| 1354 |
+
|
| 1355 |
+
log_softmax = ctx.make_node(op_type="LogSoftmax", inputs=logit.output)
|
| 1356 |
+
# implement tf.multiply(-1, tf.reduce_sum(tf.multiply(label, log_softmax), axis=1))
|
| 1357 |
+
mul1 = ctx.make_node(op_type="Mul", inputs=[label.output[0], log_softmax.output[0]])
|
| 1358 |
+
reduce_sum_output = GraphBuilder(ctx).make_reduce_sum(
|
| 1359 |
+
{"data": mul1.output[0], "axes": [-1], "keepdims": 1, "noop_with_empty_axes": 1})
|
| 1360 |
+
const_negative_one = ctx.make_const(name=utils.make_name("const_negative_one"),
|
| 1361 |
+
np_val=np.array(-1).astype(utils.ONNX_TO_NUMPY_DTYPE[logit_dtype]))
|
| 1362 |
+
mul2 = ctx.make_node(op_type="Mul", inputs=[const_negative_one.output[0], reduce_sum_output])
|
| 1363 |
+
shapes = tf_ori_node.output_shapes
|
| 1364 |
+
dtypes = tf_ori_node.output_dtypes
|
| 1365 |
+
ctx.remove_node(tf_ori_node.name)
|
| 1366 |
+
GraphBuilder(ctx).make_squeeze({'axes': [1], 'data': mul2.output[0], 'outputs': [tf_ori_node.output[0]]},
|
| 1367 |
+
shapes=[shapes[0]], dtypes=[dtypes[0]])
|
| 1368 |
+
|
| 1369 |
+
|
| 1370 |
+
def sparse_softmax_cross_entropy_with_logits_op_by_gathernd(ctx, node, **kwargs):
|
| 1371 |
+
# make subgraph to implement one_hot, idea comes from onehot_op
|
| 1372 |
+
indices_name = node.input[1]
|
| 1373 |
+
indices_shape = ctx.get_shape(indices_name)
|
| 1374 |
+
if len(indices_shape) != 1:
|
| 1375 |
+
# TODO: this works for rank=1 but tensorflow supports more than this.
|
| 1376 |
+
# Same principle should work but we need to implement our own eye.
|
| 1377 |
+
raise ValueError("onehot op: only rank1 is supported")
|
| 1378 |
+
logit_name = node.input[0]
|
| 1379 |
+
logit_dtype = ctx.get_dtype(logit_name)
|
| 1380 |
+
logit_shape = ctx.get_shape(logit_name)
|
| 1381 |
+
utils.make_sure(logit_dtype, "Dtype of {} is None".format(logit_name))
|
| 1382 |
+
indices_dtype = ctx.get_dtype(indices_name)
|
| 1383 |
+
if indices_dtype != TensorProto.INT64:
|
| 1384 |
+
indices_cast = ctx.make_node("Cast", [indices_name], attr={"to": TensorProto.INT64})
|
| 1385 |
+
indices_name = indices_cast.output[0]
|
| 1386 |
+
indices_size = ctx.make_node("Size", [indices_name])
|
| 1387 |
+
gb = GraphBuilder(ctx)
|
| 1388 |
+
indices_unsqueeze = gb.make_unsqueeze({'data': indices_name, "axes": [1]}, return_node=True)
|
| 1389 |
+
zero_const = ctx.make_const(utils.make_name("zero"), np.array(0, dtype=np.int64))
|
| 1390 |
+
one_const = ctx.make_const(utils.make_name("one"), np.array(1, dtype=np.int64))
|
| 1391 |
+
id_name = utils.make_name("sparse_softmax_id")
|
| 1392 |
+
id_output = utils.port_name(id_name)
|
| 1393 |
+
controlflow.make_range(ctx, zero_const.output[0], indices_size.output[0], one_const.output[0],
|
| 1394 |
+
id_output, id_name, shape=[-1], dtype=TensorProto.INT64)
|
| 1395 |
+
id_unsqueeze = gb.make_unsqueeze({'data': id_output, "axes": [1]}, return_node=True)
|
| 1396 |
+
indices_with_id = ctx.make_node("Concat",
|
| 1397 |
+
[id_unsqueeze.output[0], indices_unsqueeze.output[0]],
|
| 1398 |
+
attr={"axis": 1})
|
| 1399 |
+
log_softmax = ctx.make_node(op_type="LogSoftmax",
|
| 1400 |
+
inputs=[logit_name], dtypes=[logit_dtype], shapes=[logit_shape])
|
| 1401 |
+
gathernd_name = utils.make_name("sparse_softmax_gathernd")
|
| 1402 |
+
gathernd_output = utils.port_name(gathernd_name)
|
| 1403 |
+
tensor.make_gathernd(ctx, log_softmax.output[0], indices_with_id.output[0], gathernd_output,
|
| 1404 |
+
gathernd_name, logit_dtype, [logit_shape], [logit_dtype])
|
| 1405 |
+
const_name = utils.make_name("const_negative_one")
|
| 1406 |
+
const_negative_one = ctx.make_const(const_name, np.array(-1).astype(utils.map_onnx_to_numpy_type(logit_dtype)))
|
| 1407 |
+
mul2 = ctx.make_node(op_type="Mul", inputs=[const_negative_one.output[0], gathernd_output])
|
| 1408 |
+
shapes = node.output_shapes
|
| 1409 |
+
dtypes = node.output_dtypes
|
| 1410 |
+
ctx.remove_node(node.name)
|
| 1411 |
+
gb = GraphBuilder(ctx)
|
| 1412 |
+
gb.make_squeeze({'data': mul2.output[0], 'outputs': [node.output[0]], "axes": [1]},
|
| 1413 |
+
shapes=[shapes[0]], dtypes=[dtypes[0]])
|
| 1414 |
+
|
| 1415 |
+
|
| 1416 |
+
@tf_op("SoftmaxCrossEntropyWithLogits")
|
| 1417 |
+
class SoftmaxCrossEntropyWithLogits:
|
| 1418 |
+
@classmethod
|
| 1419 |
+
def version_7(cls, ctx, node, **kwargs):
|
| 1420 |
+
logits = node.inputs[0]
|
| 1421 |
+
logit_dtype = ctx.get_dtype(logits.output[0])
|
| 1422 |
+
labels = node.inputs[1]
|
| 1423 |
+
label_dtype = ctx.get_dtype(labels.output[0])
|
| 1424 |
+
if label_dtype != logit_dtype:
|
| 1425 |
+
labels = ctx.make_node("Cast", labels.output, attr={"to": logit_dtype}, dtypes=[logit_dtype])
|
| 1426 |
+
|
| 1427 |
+
_make_softmax_cross_entropy_with_logits(ctx, labels, logits, node)
|
| 1428 |
+
|
| 1429 |
+
|
| 1430 |
+
def _make_sparse_softmax_cross_entropy_with_logits(ctx, label, logit, tf_ori_node):
|
| 1431 |
+
logit = logit.output[0]
|
| 1432 |
+
label = label.output[0]
|
| 1433 |
+
label_dtype = ctx.get_dtype(label)
|
| 1434 |
+
logit_dtype = ctx.get_dtype(logit)
|
| 1435 |
+
utils.make_sure(label_dtype == logit_dtype, "the following logic only works on same dtype of label and logit")
|
| 1436 |
+
|
| 1437 |
+
# when label is onehot, logic "tf.multiply(-1, tf.reduce_sum(tf.multiply(label, log_softmax), axis=1))" is equal to
|
| 1438 |
+
# "-log(q_i)" where i is the selected index specified by label, q_i = logic_i/sum, the detail process is as follows:
|
| 1439 |
+
# logit_exp=exp(logit) >> sum = tf.reduce_sum(logit_exp, axis = -1), masked_sum = reduce_sum(mul(logit_exp, mul))
|
| 1440 |
+
# >> -log(masked_sum/sum)
|
| 1441 |
+
logit_max = ctx.make_node(op_type="ReduceMax", inputs=[logit], attr={"axes": [-1], "keepdims": 1}).output[0]
|
| 1442 |
+
logit_norm = ctx.make_node(op_type="Sub", inputs=[logit, logit_max]).output[0]
|
| 1443 |
+
logit_exp = ctx.make_node(op_type="Exp", inputs=[logit_norm]).output[0]
|
| 1444 |
+
logit_exp_sum = GraphBuilder(ctx).make_reduce_sum(
|
| 1445 |
+
{"data": logit_exp, "axes": [-1], "keepdims": 0, "noop_with_empty_axes": 1})
|
| 1446 |
+
masked = ctx.make_node(op_type="Mul", inputs=[label, logit_exp]).output[0]
|
| 1447 |
+
masked_sum = GraphBuilder(ctx).make_reduce_sum(
|
| 1448 |
+
{"data": masked, "axes": [-1], "keepdims": 0, "noop_with_empty_axes": 1})
|
| 1449 |
+
probability = ctx.make_node(op_type="Div", inputs=[masked_sum, logit_exp_sum]).output[0]
|
| 1450 |
+
log_prob = ctx.make_node(op_type="Log", inputs=[probability]).output[0]
|
| 1451 |
+
const_negative_one = ctx.make_const(name=utils.make_name("const_negative_one"),
|
| 1452 |
+
np_val=np.array(-1).astype(utils.ONNX_TO_NUMPY_DTYPE[logit_dtype])).output[0]
|
| 1453 |
+
|
| 1454 |
+
shapes = tf_ori_node.output_shapes
|
| 1455 |
+
dtypes = tf_ori_node.output_dtypes
|
| 1456 |
+
ctx.remove_node(tf_ori_node.name)
|
| 1457 |
+
ctx.make_node(op_type="Mul", inputs=[log_prob, const_negative_one],
|
| 1458 |
+
outputs=[tf_ori_node.output[0]], shapes=[shapes[0]], dtypes=[dtypes[0]])
|
| 1459 |
+
|
| 1460 |
+
|
| 1461 |
+
@tf_op("SparseSoftmaxCrossEntropyWithLogits")
|
| 1462 |
+
class SparseSoftmaxCrossEntropyWithLogits:
|
| 1463 |
+
@classmethod
|
| 1464 |
+
def version_7(cls, ctx, node, **kwargs):
|
| 1465 |
+
# make subgraph to implement one_hot, idea comes from onehot_op
|
| 1466 |
+
indices_name = node.input[1]
|
| 1467 |
+
indices_shape = ctx.get_shape(indices_name)
|
| 1468 |
+
if len(indices_shape) != 1:
|
| 1469 |
+
# TODO: this works for rank=1 but tensorflow supports more than this.
|
| 1470 |
+
# Same principle should work but we need to implement our own eye.
|
| 1471 |
+
raise ValueError("onehot op: only rank1 is supported")
|
| 1472 |
+
logit_name = node.input[0]
|
| 1473 |
+
depth = ctx.get_shape(logit_name)[-1]
|
| 1474 |
+
# if number of classes is unknown or too large
|
| 1475 |
+
if depth == utils.ONNX_UNKNOWN_DIMENSION or depth > 20000:
|
| 1476 |
+
sparse_softmax_cross_entropy_with_logits_op_by_gathernd(ctx, node, **kwargs)
|
| 1477 |
+
return
|
| 1478 |
+
logit_dtype = ctx.get_dtype(logit_name)
|
| 1479 |
+
utils.make_sure(logit_dtype, "Dtype of {} is None".format(logit_name))
|
| 1480 |
+
|
| 1481 |
+
dtype = utils.map_onnx_to_numpy_type(logit_dtype)
|
| 1482 |
+
eye = np.eye(depth).astype(dtype)
|
| 1483 |
+
const_name = utils.make_name("const_eye")
|
| 1484 |
+
const_eye = ctx.make_const(name=const_name, np_val=eye)
|
| 1485 |
+
onehot = ctx.make_node(op_type="Gather", inputs=[const_eye.output[0], indices_name], attr={"axis": 0})
|
| 1486 |
+
log_softmax = ctx.make_node(op_type="LogSoftmax", inputs=[logit_name])
|
| 1487 |
+
# implement tf.multiply(np.float32(-1.0), tf.reduce_sum(tf.multiply(one_hot, log_softmax), axis=1))
|
| 1488 |
+
mul1 = ctx.make_node(op_type="Mul", inputs=[onehot.output[0], log_softmax.output[0]])
|
| 1489 |
+
reduce_sum_output = GraphBuilder(ctx).make_reduce_sum(
|
| 1490 |
+
{"data": mul1.output[0], "axes": [1], "keepdims": 1, "noop_with_empty_axes": 1})
|
| 1491 |
+
const_name = utils.make_name("const_negative_one")
|
| 1492 |
+
const_negative_one = ctx.make_const(name=const_name, np_val=np.array(-1).astype(dtype))
|
| 1493 |
+
mul2 = ctx.make_node(op_type="Mul", inputs=[const_negative_one.output[0], reduce_sum_output])
|
| 1494 |
+
|
| 1495 |
+
shapes = node.output_shapes
|
| 1496 |
+
dtypes = node.output_dtypes
|
| 1497 |
+
ctx.remove_node(node.name)
|
| 1498 |
+
ctx.make_node(op_type="Squeeze", inputs=[mul2.output[0]], outputs=[node.output[0]], attr={"axes": [1]},
|
| 1499 |
+
shapes=[shapes[0]], dtypes=[dtypes[0]])
|
| 1500 |
+
|
| 1501 |
+
@classmethod
|
| 1502 |
+
def version_9(cls, ctx, node, **kwargs):
|
| 1503 |
+
# float32/64 output = SparseSoftmaxCrossEntropyWithLogits(float32/64 features, int32/64 labels)
|
| 1504 |
+
# the detail math process of this op is: a = onehot(labels), b = logsoftmax(features), reduce_sum(mul(a, b))
|
| 1505 |
+
logit_node = node.inputs[0]
|
| 1506 |
+
logit_shape = ctx.get_shape(node.input[0])
|
| 1507 |
+
logit_dtype = ctx.get_dtype(node.input[0])
|
| 1508 |
+
|
| 1509 |
+
label_name = node.input[1]
|
| 1510 |
+
|
| 1511 |
+
if logit_shape is not None and logit_shape[-1] != -1:
|
| 1512 |
+
num_class = logit_shape[-1]
|
| 1513 |
+
node_nme = utils.make_name("onehot_depth")
|
| 1514 |
+
depth_node = ctx.make_const(node_nme, np.array([num_class]).astype(np.int64)).output[0]
|
| 1515 |
+
else:
|
| 1516 |
+
logit_shape = ctx.make_node("Shape", [node.input[0]]).output[0]
|
| 1517 |
+
slice_args = {"data": logit_shape,
|
| 1518 |
+
"starts": [-1], "ends": [int(utils.get_max_value(np.int32))]}
|
| 1519 |
+
num_class = GraphBuilder(ctx).make_slice(kwargs=slice_args)
|
| 1520 |
+
depth_node = num_class
|
| 1521 |
+
values_node = ctx.make_const(utils.make_name("onehot_values"), np.array([0, 1]).astype(np.int64)).output[0]
|
| 1522 |
+
label_dtype = ctx.get_dtype(label_name)
|
| 1523 |
+
if label_dtype != TensorProto.INT64:
|
| 1524 |
+
onehot_indice = ctx.make_node("Cast", [label_name], attr={"to": TensorProto.INT64}).output[0]
|
| 1525 |
+
else:
|
| 1526 |
+
onehot_indice = label_name
|
| 1527 |
+
label_node = ctx.make_node(op_type="OneHot",
|
| 1528 |
+
inputs=[onehot_indice, depth_node, values_node])
|
| 1529 |
+
# the above logic makes output dtype of label_node now always int64
|
| 1530 |
+
# make sure label has same dtype as logit
|
| 1531 |
+
if logit_dtype != TensorProto.INT64:
|
| 1532 |
+
label_node = ctx.make_node("Cast", label_node.output, attr={"to": logit_dtype}, dtypes=[logit_dtype])
|
| 1533 |
+
|
| 1534 |
+
_make_sparse_softmax_cross_entropy_with_logits(ctx, label_node, logit_node, node)
|
lib/python3.10/site-packages/tf2onnx/onnx_opset/quantize.py
ADDED
|
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
"""
|
| 5 |
+
tensor
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
from __future__ import division
|
| 9 |
+
from __future__ import print_function
|
| 10 |
+
from __future__ import unicode_literals
|
| 11 |
+
|
| 12 |
+
import logging
|
| 13 |
+
|
| 14 |
+
import numpy as np
|
| 15 |
+
from onnx.onnx_pb import TensorProto
|
| 16 |
+
|
| 17 |
+
from tf2onnx import utils
|
| 18 |
+
from tf2onnx.handler import tf_op
|
| 19 |
+
from tf2onnx.utils import make_sure
|
| 20 |
+
|
| 21 |
+
logger = logging.getLogger(__name__)
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
# pylint: disable=unused-argument,missing-docstring,unused-variable,pointless-string-statement,invalid-name
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
@tf_op(["FakeQuantWithMinMaxArgs", "FakeQuantWithMinMaxVars"])
|
| 28 |
+
class FakeQuantWithMinMaxArgs:
|
| 29 |
+
# see https://www.tensorflow.org/api_docs/cc/class/tensorflow/ops/fake-quant-with-min-max-args
|
| 30 |
+
@classmethod
|
| 31 |
+
def version_10(cls, ctx, node, **kwargs):
|
| 32 |
+
# hack to make up for the missing onnx pack op
|
| 33 |
+
if node.type == "FakeQuantWithMinMaxVars":
|
| 34 |
+
utils.make_sure(node.inputs[1].is_scalar(), "%s node %s requires const scalar value for min",
|
| 35 |
+
node.type, node.name)
|
| 36 |
+
utils.make_sure(node.inputs[2].is_scalar(), "%s node %s requires const scalar value for max",
|
| 37 |
+
node.type, node.name)
|
| 38 |
+
amin = node.inputs[1].get_tensor_value()
|
| 39 |
+
amax = node.inputs[2].get_tensor_value()
|
| 40 |
+
else:
|
| 41 |
+
amin = node.get_attr("min").f
|
| 42 |
+
amax = node.get_attr("max").f
|
| 43 |
+
narrow_range = node.get_attr("narrow_range").i
|
| 44 |
+
num_bits = node.get_attr("num_bits").i
|
| 45 |
+
|
| 46 |
+
make_sure(
|
| 47 |
+
not narrow_range,
|
| 48 |
+
"Unable to convert node FakeQuantWithMinMaxArgs with narrow_range=%r",
|
| 49 |
+
narrow_range)
|
| 50 |
+
make_sure(
|
| 51 |
+
num_bits == 8,
|
| 52 |
+
"Unable to convert node FakeQuantWithMinMaxArgs with "
|
| 53 |
+
"num_bits=%r", num_bits)
|
| 54 |
+
|
| 55 |
+
scale = (amax - amin) / (2 ** num_bits - 1)
|
| 56 |
+
min_adj = np.around(amin / scale)
|
| 57 |
+
|
| 58 |
+
dtype = ctx.get_dtype(node.input[0])
|
| 59 |
+
shape = ctx.get_shape(node.input[0])
|
| 60 |
+
axis = 1
|
| 61 |
+
idtype = TensorProto.UINT8
|
| 62 |
+
|
| 63 |
+
pb_scale = ctx.make_const(
|
| 64 |
+
utils.make_name("{}_scaley".format(node.name)),
|
| 65 |
+
np.array(scale, dtype=np.float32))
|
| 66 |
+
zero = np.array(-min_adj, dtype=np.uint8)
|
| 67 |
+
make_sure(
|
| 68 |
+
zero == -min_adj,
|
| 69 |
+
"Cannot convert %s node %s with "
|
| 70 |
+
"min=%r max=%r numbits=%r because zero_scale=%r "
|
| 71 |
+
"is outside uint8 boundary",
|
| 72 |
+
node.type, node.name, amin, amax, num_bits, -min_adj)
|
| 73 |
+
zero_point = ctx.make_const(
|
| 74 |
+
utils.make_name("{}_zpy".format(node.name)), zero)
|
| 75 |
+
|
| 76 |
+
new_node = ctx.make_node(
|
| 77 |
+
"QuantizeLinear", [node.input[0], pb_scale.name, zero_point.name],
|
| 78 |
+
op_name_scope=node.name, attr={"axis": axis},
|
| 79 |
+
shapes=[shape], dtypes=[idtype])
|
| 80 |
+
output_name = new_node.output[0]
|
| 81 |
+
ctx.replace_input(node, node.input[0], output_name, 0)
|
| 82 |
+
|
| 83 |
+
ctx.remove_node(node.name)
|
| 84 |
+
|
| 85 |
+
last_node = ctx.make_node(
|
| 86 |
+
"DequantizeLinear", [new_node.output[0], pb_scale.name, zero_point.name],
|
| 87 |
+
op_name_scope=node.name, attr={"axis": axis},
|
| 88 |
+
shapes=[shape], dtypes=[dtype])
|
| 89 |
+
ctx.replace_all_inputs(node.output[0], last_node.output[0]) # ops=ctx.get_nodes()
|
lib/python3.10/site-packages/tf2onnx/onnx_opset/reduction.py
ADDED
|
@@ -0,0 +1,316 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
"""
|
| 5 |
+
reduction
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
from __future__ import division
|
| 9 |
+
from __future__ import print_function
|
| 10 |
+
from __future__ import unicode_literals
|
| 11 |
+
|
| 12 |
+
import logging
|
| 13 |
+
|
| 14 |
+
import numpy as np
|
| 15 |
+
from onnx import onnx_pb, helper
|
| 16 |
+
|
| 17 |
+
from tf2onnx import utils
|
| 18 |
+
from tf2onnx.handler import tf_op
|
| 19 |
+
from tf2onnx.graph_builder import GraphBuilder
|
| 20 |
+
|
| 21 |
+
logger = logging.getLogger(__name__)
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
# pylint: disable=unused-argument,missing-docstring
|
| 25 |
+
|
| 26 |
+
@tf_op("Min", onnx_op="ReduceMin")
|
| 27 |
+
@tf_op("Max", onnx_op="ReduceMax")
|
| 28 |
+
@tf_op("Mean", onnx_op="ReduceMean")
|
| 29 |
+
@tf_op("Sum", onnx_op="ReduceSum")
|
| 30 |
+
@tf_op("Prod", onnx_op="ReduceProd")
|
| 31 |
+
class ReduceOpBase:
|
| 32 |
+
@classmethod
|
| 33 |
+
def version_1(cls, ctx, node, **kwargs):
|
| 34 |
+
axes_node = node.inputs[1]
|
| 35 |
+
axes = axes_node.get_tensor_value()
|
| 36 |
+
if np.isscalar(axes):
|
| 37 |
+
axes = [axes]
|
| 38 |
+
input_shape = ctx.get_shape(node.input[0])
|
| 39 |
+
if input_shape is None:
|
| 40 |
+
if any([val < 0 for val in axes]) and ctx.opset < 11:
|
| 41 |
+
raise ValueError("reduce_op: cannot have negative axis if opset < 11 because we don't know input rank")
|
| 42 |
+
else:
|
| 43 |
+
input_rank = len(ctx.get_shape(node.input[0]))
|
| 44 |
+
axes = [val + input_rank if val < 0 else val for val in axes]
|
| 45 |
+
|
| 46 |
+
node.set_attr("axes", axes)
|
| 47 |
+
ctx.remove_input(node, node.input[1], 1)
|
| 48 |
+
keep_dims = node.get_attr_value("keep_dims", 0)
|
| 49 |
+
node.set_attr("keepdims", keep_dims)
|
| 50 |
+
del node.attr['keep_dims']
|
| 51 |
+
|
| 52 |
+
@classmethod
|
| 53 |
+
def version_11(cls, ctx, node, **kwargs):
|
| 54 |
+
# Opset 11 supports negative axis, but core logic is same
|
| 55 |
+
cls.version_1(ctx, node, **kwargs)
|
| 56 |
+
|
| 57 |
+
@classmethod
|
| 58 |
+
def version_13(cls, ctx, node, **kwargs):
|
| 59 |
+
if node.type == "ReduceSum":
|
| 60 |
+
keep_dims = node.get_attr_value("keep_dims", 0)
|
| 61 |
+
node.set_attr("keepdims", keep_dims)
|
| 62 |
+
del node.attr['keep_dims']
|
| 63 |
+
node.set_attr("noop_with_empty_axes", 1)
|
| 64 |
+
if ctx.get_dtype(node.input[1]) != onnx_pb.TensorProto.INT64:
|
| 65 |
+
ctx.insert_new_node_on_input(node, "Cast", node.input[1], to=onnx_pb.TensorProto.INT64)
|
| 66 |
+
input_shape = ctx.get_shape(node.input[1])
|
| 67 |
+
input_rank = len(input_shape) if input_shape is not None else None
|
| 68 |
+
if input_rank != 1:
|
| 69 |
+
new_shape = ctx.make_const(utils.make_name("reshape_const"), np.array([-1], np.int64))
|
| 70 |
+
ctx.insert_new_node_on_input(node, "Reshape", [node.input[1], new_shape.output[0]])
|
| 71 |
+
else:
|
| 72 |
+
cls.version_11(ctx, node, **kwargs)
|
| 73 |
+
|
| 74 |
+
@tf_op(["ArgMax", "ArgMin"])
|
| 75 |
+
class ArgMax:
|
| 76 |
+
@classmethod
|
| 77 |
+
def version_1(cls, ctx, node, **kwargs):
|
| 78 |
+
# output_type output = ArgMin(T input, Tidx dimension, @type Tidx, @type output_type)
|
| 79 |
+
# tensor(int32) reduced = ArgMin(T data, @INT axis, @INT keepdims)
|
| 80 |
+
axis_node = node.inputs[1]
|
| 81 |
+
axis = axis_node.get_tensor_value()
|
| 82 |
+
if axis < 0:
|
| 83 |
+
# ArgMax|ArgMin in onnx don't necessary support negative axis(not in doc explicitly)
|
| 84 |
+
input_shape = ctx.get_shape(node.input[0])
|
| 85 |
+
dim_count = len(input_shape) if input_shape else 0
|
| 86 |
+
axis = dim_count + axis
|
| 87 |
+
|
| 88 |
+
# TF ArgMin/ArgMax may return int32 or int64
|
| 89 |
+
# Onnx ArgMin/ArgMax only supports int64 output, add cast if needed
|
| 90 |
+
if node.get_attr_int("output_type") == onnx_pb.TensorProto.INT32:
|
| 91 |
+
# current node will return int64 after conversion, which differs from previous dtype got from tf
|
| 92 |
+
ctx.set_dtype(node.output[0], onnx_pb.TensorProto.INT64)
|
| 93 |
+
op_name = utils.make_name("Cast")
|
| 94 |
+
cast_node = ctx.insert_new_node_on_output("Cast", node.output[0], name=op_name,
|
| 95 |
+
to=onnx_pb.TensorProto.INT32)
|
| 96 |
+
ctx.set_dtype(cast_node.output[0], onnx_pb.TensorProto.INT32)
|
| 97 |
+
ctx.copy_shape(node.output[0], cast_node.output[0])
|
| 98 |
+
|
| 99 |
+
node.set_attr("axis", axis)
|
| 100 |
+
node.set_attr("keepdims", 0)
|
| 101 |
+
ctx.remove_input(node, node.input[1], 1)
|
| 102 |
+
|
| 103 |
+
@classmethod
|
| 104 |
+
def version_11(cls, ctx, node, **kwargs):
|
| 105 |
+
# Opset 11 supports negative axis, but core logic same
|
| 106 |
+
cls.version_1(ctx, node, **kwargs)
|
| 107 |
+
|
| 108 |
+
@classmethod
|
| 109 |
+
def version_12(cls, ctx, node, **kwargs):
|
| 110 |
+
# Opset 12 adds extra attribute 'select_last_index'
|
| 111 |
+
# No changes needed
|
| 112 |
+
cls.version_1(ctx, node, **kwargs)
|
| 113 |
+
|
| 114 |
+
@tf_op(["All", "Any"])
|
| 115 |
+
class AllAny:
|
| 116 |
+
@classmethod
|
| 117 |
+
def version_6(cls, ctx, node, **kwargs):
|
| 118 |
+
# T output = All(T x, list(int) reduce_indices, @bool keepdims)
|
| 119 |
+
# T output = Any(T x, list(int) reduce_indices, @bool keepdims)
|
| 120 |
+
reduce_dim = node.inputs[1].get_tensor_value()
|
| 121 |
+
|
| 122 |
+
# for Any, the reduce_indices can be scalar as observed.
|
| 123 |
+
if np.isscalar(reduce_dim):
|
| 124 |
+
reduce_dim = [reduce_dim]
|
| 125 |
+
|
| 126 |
+
if ctx.opset < 11:
|
| 127 |
+
utils.make_sure(all(i >= 0 for i in reduce_dim), "negative reduce axis is not supported in onnx for now")
|
| 128 |
+
|
| 129 |
+
cast = ctx.make_node(op_type="Cast", inputs=[node.input[0]], attr={"to": onnx_pb.TensorProto.FLOAT})
|
| 130 |
+
keepdims = helper.get_attribute_value(node.get_attr("keep_dims"))
|
| 131 |
+
op_type = "ReduceMin" if node.type == "All" else "ReduceSum"
|
| 132 |
+
|
| 133 |
+
if op_type == "ReduceSum":
|
| 134 |
+
reduce_node_output = GraphBuilder(ctx).make_reduce_sum(
|
| 135 |
+
{"data": cast.output[0], "axes": reduce_dim, "keepdims": keepdims, "noop_with_empty_axes": 1})
|
| 136 |
+
else:
|
| 137 |
+
reduce_node_output = ctx.make_node(op_type=op_type, inputs=cast.output,
|
| 138 |
+
attr={"axes": reduce_dim, "keepdims": keepdims}).output[0]
|
| 139 |
+
|
| 140 |
+
zero_node = ctx.make_const(utils.make_name("zero_reduce"), np.array(0, dtype=np.float32))
|
| 141 |
+
|
| 142 |
+
shapes = node.output_shapes
|
| 143 |
+
dtypes = node.output_dtypes
|
| 144 |
+
ctx.remove_node(node.name)
|
| 145 |
+
ctx.make_node(op_type="Greater", inputs=[reduce_node_output, zero_node.output[0]],
|
| 146 |
+
name=node.name, outputs=node.output, shapes=shapes, dtypes=dtypes)
|
| 147 |
+
|
| 148 |
+
|
| 149 |
+
@tf_op("AddN")
|
| 150 |
+
class AddN():
|
| 151 |
+
@classmethod
|
| 152 |
+
def version_6(cls, ctx, node, **kwargs):
|
| 153 |
+
node.type = "Sum"
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
@tf_op(["SegmentSum", "SegmentProd", "SegmentMax", "SegmentMin", "SegmentMean",
|
| 157 |
+
"SparseSegmentSum", "SparseSegmentMean", "SparseSegmentSqrtN",
|
| 158 |
+
"SparseSegmentSumWithNumSegments", "SparseSegmentMeanWithNumSegments", "SparseSegmentSqrtNWithNumSegments",
|
| 159 |
+
"UnsortedSegmentSum", "UnsortedSegmentProd", "UnsortedSegmentMax", "UnsortedSegmentMin"])
|
| 160 |
+
class SegmentSum():
|
| 161 |
+
@classmethod
|
| 162 |
+
def any_version(cls, opset, ctx, node, **kwargs):
|
| 163 |
+
node_inputs = node.input
|
| 164 |
+
num_segments_specified = False
|
| 165 |
+
if node.type.endswith("WithNumSegments") or node.type.startswith("Unsorted"):
|
| 166 |
+
num_segments_specified = True
|
| 167 |
+
num_segments = node_inputs.pop()
|
| 168 |
+
node.type = node.type.replace("WithNumSegments", "")
|
| 169 |
+
node.type = node.type.replace("Unsorted", "")
|
| 170 |
+
if node.type.startswith("Sparse"):
|
| 171 |
+
data_inp, indices_inp, segment_inp = node_inputs
|
| 172 |
+
gather_node = ctx.make_node("Gather", [data_inp, indices_inp], attr={'axis': 0})
|
| 173 |
+
data_inp = gather_node.output[0]
|
| 174 |
+
node.type = node.type.replace("Sparse", "")
|
| 175 |
+
else:
|
| 176 |
+
data_inp, segment_inp = node_inputs
|
| 177 |
+
|
| 178 |
+
# Data has shape [n, a, b, ..., c]
|
| 179 |
+
data_shape = ctx.get_shape(data_inp)
|
| 180 |
+
data_rank = len(data_shape) if data_shape is not None else None
|
| 181 |
+
data_dtype = ctx.get_dtype(data_inp)
|
| 182 |
+
seg_rank = ctx.get_rank(segment_inp)
|
| 183 |
+
utils.make_sure(seg_rank == 1, "Segment ops only supported for segments of rank 1, not %s", seg_rank)
|
| 184 |
+
data_np_dtype = utils.map_onnx_to_numpy_type(data_dtype)
|
| 185 |
+
seg_np_dtype = utils.map_onnx_to_numpy_type(ctx.get_dtype(segment_inp))
|
| 186 |
+
|
| 187 |
+
if num_segments_specified and ctx.get_dtype(segment_inp) != ctx.get_dtype(num_segments):
|
| 188 |
+
num_segments = ctx.make_node("Cast", [num_segments], attr={"to": ctx.get_dtype(segment_inp)}).output[0]
|
| 189 |
+
|
| 190 |
+
data_is_float = np.dtype(data_np_dtype).kind == 'f'
|
| 191 |
+
data_is_int = np.dtype(data_np_dtype).kind == 'i'
|
| 192 |
+
utils.make_sure(data_is_float or data_is_int, "dtype for Segment ops must be float or int")
|
| 193 |
+
|
| 194 |
+
if node.type in ["SegmentSum", "SegmentMean", "SegmentSqrtN"]:
|
| 195 |
+
onnx_op = "ReduceSum"
|
| 196 |
+
identity_value = np.array(0, dtype=data_np_dtype)
|
| 197 |
+
elif node.type == "SegmentProd":
|
| 198 |
+
onnx_op = "ReduceProd"
|
| 199 |
+
identity_value = np.array(1, dtype=data_np_dtype)
|
| 200 |
+
elif node.type == "SegmentMax":
|
| 201 |
+
onnx_op = "ReduceMax"
|
| 202 |
+
if data_is_float:
|
| 203 |
+
identity_value = np.array('-inf', dtype=data_np_dtype)
|
| 204 |
+
else:
|
| 205 |
+
identity_value = np.iinfo(data_np_dtype).min
|
| 206 |
+
elif node.type == "SegmentMin":
|
| 207 |
+
onnx_op = "ReduceMin"
|
| 208 |
+
if data_is_float:
|
| 209 |
+
identity_value = np.array('inf', dtype=data_np_dtype)
|
| 210 |
+
else:
|
| 211 |
+
identity_value = np.iinfo(data_np_dtype).max
|
| 212 |
+
|
| 213 |
+
if not num_segments_specified:
|
| 214 |
+
max_segment = ctx.make_node("ReduceMax", [segment_inp], attr={'axes': [0], 'keepdims': 0})
|
| 215 |
+
one_const = ctx.make_const(utils.make_name("const_one"), np.array(1, dtype=seg_np_dtype))
|
| 216 |
+
num_segments = ctx.make_node("Add", [max_segment.output[0], one_const.output[0]]).output[0]
|
| 217 |
+
# ORT doesn't support bool for OneHot so we use float32 and cast to bool
|
| 218 |
+
onehot_values = ctx.make_const(utils.make_name("onehot_values"), np.array([0, 1], dtype=np.float32))
|
| 219 |
+
# one_hot_node has shape [s, n] (s is # segments)
|
| 220 |
+
one_hot_node = ctx.make_node("OneHot", [segment_inp, num_segments, onehot_values.output[0]],
|
| 221 |
+
attr={'axis': 0})
|
| 222 |
+
if node.type == "SegmentMean":
|
| 223 |
+
scaling_node_output = GraphBuilder(ctx).make_reduce_sum(
|
| 224 |
+
{"data": one_hot_node.output[0], "axes": [1], "keepdims": 0, "noop_with_empty_axes": 1})
|
| 225 |
+
elif node.type == "SegmentSqrtN":
|
| 226 |
+
seg_cnts_node_output = GraphBuilder(ctx).make_reduce_sum(
|
| 227 |
+
{"data": one_hot_node.output[0], "axes": [1], "keepdims": 0, "noop_with_empty_axes": 1})
|
| 228 |
+
scaling_node_output = ctx.make_node("Sqrt", [seg_cnts_node_output]).output[0]
|
| 229 |
+
else:
|
| 230 |
+
scaling_node_output = None
|
| 231 |
+
|
| 232 |
+
if scaling_node_output is not None and num_segments_specified:
|
| 233 |
+
# If empty segments are possible, we must avoid division by zero
|
| 234 |
+
const_one_float = ctx.make_const(utils.make_name("const_one_float"), np.array(1, dtype=np.float32))
|
| 235 |
+
scaling_node_output = ctx.make_node("Max", [scaling_node_output, const_one_float.output[0]]).output[0]
|
| 236 |
+
|
| 237 |
+
|
| 238 |
+
if onnx_op == "ReduceSum":
|
| 239 |
+
# If the op is a summation, we can use MatMul instead of Where, which is faster
|
| 240 |
+
|
| 241 |
+
# Data shape is [n, a, b, ..., c]
|
| 242 |
+
data_shape_node = ctx.make_node("Shape", [data_inp])
|
| 243 |
+
new_shape = ctx.make_const(utils.make_name("reshape_const"), np.array([0, -1], dtype=np.int64))
|
| 244 |
+
# Reshape the data from [n, a, b, ..., c] to [n, P]
|
| 245 |
+
data_reshape = ctx.make_node("Reshape", [data_inp, new_shape.output[0]])
|
| 246 |
+
|
| 247 |
+
one_hot_cast = one_hot_node
|
| 248 |
+
if data_dtype != onnx_pb.TensorProto.FLOAT:
|
| 249 |
+
one_hot_cast = ctx.make_node("Cast", [one_hot_node.output[0]], attr={'to': data_dtype})
|
| 250 |
+
|
| 251 |
+
# Shapes [s, n] * [n, P] => [s, P]
|
| 252 |
+
product = ctx.make_node("MatMul", [one_hot_cast.output[0], data_reshape.output[0]], op_name_scope=node.name)
|
| 253 |
+
if scaling_node_output is not None:
|
| 254 |
+
scaling_node_unsqueeze = GraphBuilder(ctx).make_unsqueeze(
|
| 255 |
+
{'data': scaling_node_output, 'axes': [1]}, return_node=True)
|
| 256 |
+
product = ctx.make_node("Div", [product.output[0], scaling_node_unsqueeze.output[0]])
|
| 257 |
+
|
| 258 |
+
# Create new shape [0, a, b, ..., c]
|
| 259 |
+
max_int64 = int(utils.get_max_value(np.int64))
|
| 260 |
+
new_shape_slice = GraphBuilder(ctx).make_slice(
|
| 261 |
+
{"data": data_shape_node.output[0], "ends": [max_int64], "starts": [1], "axes": [0]})
|
| 262 |
+
zero_const = ctx.make_const(utils.make_name("zero_const"), np.array([0], dtype=np.int64))
|
| 263 |
+
new_shape = ctx.make_node("Concat", [zero_const.output[0], new_shape_slice], attr={'axis': 0})
|
| 264 |
+
|
| 265 |
+
shapes = node.output_shapes
|
| 266 |
+
dtypes = node.output_dtypes
|
| 267 |
+
ctx.remove_node(node.name)
|
| 268 |
+
# Reshape result from [s, P] to [s, a, b, ..., c]
|
| 269 |
+
ctx.make_node("Reshape", [product.output[0], new_shape.output[0]],
|
| 270 |
+
name=node.name, outputs=node.output, shapes=shapes, dtypes=dtypes)
|
| 271 |
+
return
|
| 272 |
+
|
| 273 |
+
identity_const = ctx.make_const(utils.make_name("const_identity"), identity_value)
|
| 274 |
+
one_hot_bool = ctx.make_node("Cast", [one_hot_node.output[0]], attr={"to": onnx_pb.TensorProto.BOOL})
|
| 275 |
+
one_hot_unsqueeze = one_hot_bool
|
| 276 |
+
|
| 277 |
+
# Make one_hot_unsqueeze have shape [s, n, 1, 1, ..., 1]
|
| 278 |
+
if data_rank is None:
|
| 279 |
+
# Unsqueeze requires known rank, but we can use Reshape if rank is unknown
|
| 280 |
+
shape_node = ctx.make_node("Shape", [data_inp])
|
| 281 |
+
rank_node = ctx.make_node("Shape", [shape_node.output[0]])
|
| 282 |
+
one_const_int64 = ctx.make_const(utils.make_name("const_one"), np.array([1], dtype=np.int64))
|
| 283 |
+
num_unsqueeze_dims = ctx.make_node("Sub", [rank_node.output[0], one_const_int64.output[0]])
|
| 284 |
+
|
| 285 |
+
one_tensor = helper.make_tensor("value", onnx_pb.TensorProto.INT64, dims=[1], vals=[1])
|
| 286 |
+
unsqueeze_dims = ctx.make_node("ConstantOfShape", inputs=[num_unsqueeze_dims.output[0]],
|
| 287 |
+
attr={"value": one_tensor})
|
| 288 |
+
# Zero indicates a dimension should be unchanged
|
| 289 |
+
double_zero_const = ctx.make_const(utils.make_name("double_zero"), np.array([0, 0], dtype=np.int64))
|
| 290 |
+
expanded_shape = ctx.make_node("Concat", [double_zero_const.output[0], unsqueeze_dims.output[0]],
|
| 291 |
+
attr={'axis': 0})
|
| 292 |
+
one_hot_unsqueeze = ctx.make_node("Reshape", [one_hot_bool.output[0], expanded_shape.output[0]])
|
| 293 |
+
elif data_rank > 1:
|
| 294 |
+
new_dims = list(range(2, 2 + data_rank - 1))
|
| 295 |
+
one_hot_unsqueeze = GraphBuilder(ctx).make_unsqueeze(
|
| 296 |
+
{'data': one_hot_bool.output[0], 'axes': new_dims}, return_node=True)
|
| 297 |
+
|
| 298 |
+
# Shape of data: [n, a, b, ..., c]
|
| 299 |
+
# Shape of one_hot: [s, n, 1, 1, ..., 1]
|
| 300 |
+
# Broadcast left-pads shape with 1s, so result is shape: [s, n, a, b, ..., c]
|
| 301 |
+
where_node = ctx.make_node("Where", [one_hot_unsqueeze.output[0], data_inp, identity_const.output[0]])
|
| 302 |
+
|
| 303 |
+
shapes = node.output_shapes
|
| 304 |
+
dtypes = node.output_dtypes
|
| 305 |
+
ctx.remove_node(node.name)
|
| 306 |
+
# After reduction over axis 1, shape is: [s, a, b, ..., c]
|
| 307 |
+
ctx.make_node(onnx_op, [where_node.output[0]], attr={'axes': [1], 'keepdims': 0},
|
| 308 |
+
name=node.name, outputs=node.output, shapes=shapes, dtypes=dtypes)
|
| 309 |
+
|
| 310 |
+
@classmethod
|
| 311 |
+
def version_9(cls, ctx, node, **kwargs):
|
| 312 |
+
cls.any_version(9, ctx, node, **kwargs)
|
| 313 |
+
|
| 314 |
+
@classmethod
|
| 315 |
+
def version_13(cls, ctx, node, **kwargs):
|
| 316 |
+
cls.any_version(13, ctx, node, **kwargs)
|
lib/python3.10/site-packages/tf2onnx/onnx_opset/rnn.py
ADDED
|
@@ -0,0 +1,263 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
"""
|
| 5 |
+
rnn
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
from __future__ import division
|
| 9 |
+
from __future__ import print_function
|
| 10 |
+
from __future__ import unicode_literals
|
| 11 |
+
|
| 12 |
+
import logging
|
| 13 |
+
import numpy as np
|
| 14 |
+
from tf2onnx import utils
|
| 15 |
+
from tf2onnx.handler import tf_op
|
| 16 |
+
from tf2onnx.graph_builder import GraphBuilder
|
| 17 |
+
|
| 18 |
+
logger = logging.getLogger(__name__)
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
# pylint: disable=unused-argument,missing-docstring
|
| 22 |
+
|
| 23 |
+
@tf_op("LSTMBlockCell")
|
| 24 |
+
class LSTMBlockCell:
|
| 25 |
+
@classmethod
|
| 26 |
+
def version_1(cls, ctx, node, **kwargs):
|
| 27 |
+
"""
|
| 28 |
+
Args:
|
| 29 |
+
x: A `Tensor`. Must be one of the following types: `float32`.
|
| 30 |
+
The input to the LSTM cell, shape (batch_size, num_inputs).
|
| 31 |
+
cs_prev: A `Tensor`. Must have the same type as `x`.
|
| 32 |
+
Value of the cell state at previous time step.
|
| 33 |
+
h_prev: A `Tensor`. Must have the same type as `x`.
|
| 34 |
+
Output of the previous cell at previous time step.
|
| 35 |
+
w: A `Tensor`. Must have the same type as `x`. The weight matrix.
|
| 36 |
+
wci: A `Tensor`. Must have the same type as `x`.
|
| 37 |
+
The weight matrix for input gate peephole connection.
|
| 38 |
+
wcf: A `Tensor`. Must have the same type as `x`.
|
| 39 |
+
The weight matrix for forget gate peephole connection.
|
| 40 |
+
wco: A `Tensor`. Must have the same type as `x`.
|
| 41 |
+
The weight matrix for output gate peephole connection.
|
| 42 |
+
b: A `Tensor`. Must have the same type as `x`. The bias vector.
|
| 43 |
+
forget_bias: An optional `float`. Defaults to `1`. The forget gate bias.
|
| 44 |
+
cell_clip: An optional `float`. Defaults to `-1` (no clipping).
|
| 45 |
+
Value to clip the 'cs' value to. Disable by setting to negative value.
|
| 46 |
+
use_peephole: An optional `bool`. Defaults to `False`.
|
| 47 |
+
Whether to use peephole weights.
|
| 48 |
+
name: A name for the operation (optional).
|
| 49 |
+
Returns:
|
| 50 |
+
A tuple of `Tensor` objects (i, cs, f, o, ci, co, h).
|
| 51 |
+
i: A `Tensor`. Has the same type as `x`. The input gate.
|
| 52 |
+
cs: A `Tensor`. Has the same type as `x`. The cell state before the tanh.
|
| 53 |
+
f: A `Tensor`. Has the same type as `x`. The forget gate.
|
| 54 |
+
o: A `Tensor`. Has the same type as `x`. The output gate.
|
| 55 |
+
ci: A `Tensor`. Has the same type as `x`. The cell input.
|
| 56 |
+
co: A `Tensor`. Has the same type as `x`. The cell after the tanh.
|
| 57 |
+
h: A `Tensor`. Has the same type as `x`. The output h vector.
|
| 58 |
+
```python
|
| 59 |
+
xh = [x, h_prev]
|
| 60 |
+
[i, ci, f, o] = xh * w + b
|
| 61 |
+
f = f + forget_bias
|
| 62 |
+
if not use_peephole:
|
| 63 |
+
wci = wcf = wco = 0
|
| 64 |
+
i = sigmoid(cs_prev .* wci + i)
|
| 65 |
+
f = sigmoid(cs_prev .* wcf + f)
|
| 66 |
+
ci = tanh(ci)
|
| 67 |
+
cs = ci .* i + cs_prev .* f
|
| 68 |
+
cs = clip(cs, cell_clip)
|
| 69 |
+
o = sigmoid(cs * wco + o)
|
| 70 |
+
co = tanh(cs)
|
| 71 |
+
h = co .* o
|
| 72 |
+
```
|
| 73 |
+
"""
|
| 74 |
+
nodes = []
|
| 75 |
+
x, cs_prev, h_prev, w, wci, wcf, wco, b = node.input
|
| 76 |
+
forget_bias = float(node.get_attr("forget_bias").f)
|
| 77 |
+
cell_clip = float(node.get_attr("cell_clip").f)
|
| 78 |
+
use_peephole = bool(node.get_attr("use_peephole").i)
|
| 79 |
+
|
| 80 |
+
def make_sigmoid(i, w, b):
|
| 81 |
+
i_w_node = ctx.make_node("Mul", [i, w])
|
| 82 |
+
i_w_b_node = ctx.make_node("Add", [i_w_node.output[0], b])
|
| 83 |
+
output_node = ctx.make_node("Sigmoid", [i_w_b_node.output[0]])
|
| 84 |
+
nodes.extend([i_w_node, i_w_b_node, output_node])
|
| 85 |
+
return output_node.output[0]
|
| 86 |
+
|
| 87 |
+
# xh = [x, h]
|
| 88 |
+
xh_node = ctx.make_node("Concat", [x, h_prev], attr={"axis": 1})
|
| 89 |
+
|
| 90 |
+
# i, ci, f, o = xh * w + b
|
| 91 |
+
xh_w_node = ctx.make_node("MatMul", [xh_node.output[0], w])
|
| 92 |
+
w_shape = ctx.get_shape(w)
|
| 93 |
+
if len(w_shape) != 2 or w_shape[1] % 4 != 0:
|
| 94 |
+
raise RuntimeError("shape of W of LSTMBlockCell {} should be times of 4".format(node.name))
|
| 95 |
+
merged_output_node = ctx.make_node("Add", [xh_w_node.output[0], b])
|
| 96 |
+
w_last_dim = int(w_shape[1] / 4)
|
| 97 |
+
split_output_node = ctx.make_node(
|
| 98 |
+
"Split", [merged_output_node.output[0]],
|
| 99 |
+
attr={"axis": 1},
|
| 100 |
+
output_count=4
|
| 101 |
+
)
|
| 102 |
+
i, ci, f, o = split_output_node.output
|
| 103 |
+
|
| 104 |
+
# f = f + forget_bias
|
| 105 |
+
forget_bias_const = ctx.make_const(
|
| 106 |
+
utils.make_name("{}__forget_bias".format(node.name)),
|
| 107 |
+
np.array(forget_bias, dtype=np.float32)
|
| 108 |
+
)
|
| 109 |
+
f_node = ctx.make_node("Add", [f, forget_bias_const.output[0]])
|
| 110 |
+
|
| 111 |
+
if not use_peephole:
|
| 112 |
+
zeros_const = ctx.make_const(
|
| 113 |
+
utils.make_name("{}__zeros_const".format(node.name)),
|
| 114 |
+
np.zeros([w_last_dim], dtype=np.float32)
|
| 115 |
+
)
|
| 116 |
+
nodes.append(zeros_const)
|
| 117 |
+
wci = zeros_const.output[0]
|
| 118 |
+
wcf = zeros_const.output[0]
|
| 119 |
+
wco = zeros_const.output[0]
|
| 120 |
+
|
| 121 |
+
# i = sigmoid(cs_prev .* wci + i)
|
| 122 |
+
i = make_sigmoid(cs_prev, wci, i)
|
| 123 |
+
# f = sigmoid(cs_prev .* wcf + f)
|
| 124 |
+
f = make_sigmoid(cs_prev, wcf, f_node.output[0])
|
| 125 |
+
# ci = Tanh(ci)
|
| 126 |
+
ci_node = ctx.make_node("Tanh", [ci])
|
| 127 |
+
# cs = ci .* i + f .* cs_prev
|
| 128 |
+
ci_i_node = ctx.make_node("Mul", [ci_node.output[0], i])
|
| 129 |
+
cs_prev_f_node = ctx.make_node("Mul", [cs_prev, f])
|
| 130 |
+
cs_node = ctx.make_node("Add", [ci_i_node.output[0], cs_prev_f_node.output[0]])
|
| 131 |
+
cs = cs_node.output[0]
|
| 132 |
+
# cs = clip(cs)
|
| 133 |
+
if cell_clip > 0:
|
| 134 |
+
if ctx.opset < 11:
|
| 135 |
+
cs_clip_node = ctx.make_node("Clip", [cs], attr={"max": cell_clip, "min": -cell_clip})
|
| 136 |
+
nodes.append(cs_clip_node)
|
| 137 |
+
cs = cs_clip_node.output[0]
|
| 138 |
+
else:
|
| 139 |
+
dtype = utils.map_onnx_to_numpy_type(ctx.get_dtype(cs))
|
| 140 |
+
name_min = utils.make_name("{}_min".format(node.name))
|
| 141 |
+
name_max = utils.make_name("{}_max".format(node.name))
|
| 142 |
+
min_const = ctx.make_const(name_min, np.array(-cell_clip, dtype=dtype))
|
| 143 |
+
max_const = ctx.make_const(name_max, np.array(cell_clip, dtype=dtype))
|
| 144 |
+
cs_clip_node = ctx.make_node('Clip', [cs, min_const.output[0], max_const.output[0]])
|
| 145 |
+
nodes.append(cs_clip_node)
|
| 146 |
+
cs = cs_clip_node.output[0]
|
| 147 |
+
|
| 148 |
+
# o = cs * wco + o
|
| 149 |
+
o = make_sigmoid(cs, wco, o)
|
| 150 |
+
# co = Tanh(cs)
|
| 151 |
+
co_node = ctx.make_node("Tanh", [cs])
|
| 152 |
+
# h = co .* o
|
| 153 |
+
h_node = ctx.make_node("Mul", [co_node.output[0], o])
|
| 154 |
+
|
| 155 |
+
def replace_output(old_output, new_output):
|
| 156 |
+
ctx.replace_all_inputs(old_output, new_output) # ops=ctx.get_nodes()
|
| 157 |
+
ctx.copy_dtype(old_output, new_output)
|
| 158 |
+
ctx.copy_shape(old_output, new_output)
|
| 159 |
+
|
| 160 |
+
replace_output(node.output[0], i)
|
| 161 |
+
replace_output(node.output[1], cs)
|
| 162 |
+
replace_output(node.output[2], f)
|
| 163 |
+
replace_output(node.output[3], o)
|
| 164 |
+
replace_output(node.output[4], ci_node.output[0])
|
| 165 |
+
replace_output(node.output[5], co_node.output[0])
|
| 166 |
+
replace_output(node.output[6], h_node.output[0])
|
| 167 |
+
|
| 168 |
+
@classmethod
|
| 169 |
+
def version_7(cls, ctx, node, **kwargs):
|
| 170 |
+
cls.version_1(ctx, node, **kwargs)
|
| 171 |
+
|
| 172 |
+
|
| 173 |
+
@tf_op("CudnnRNN")
|
| 174 |
+
class CudnnRNN:
|
| 175 |
+
@classmethod
|
| 176 |
+
def version_10(cls, ctx, node, **kwargs):
|
| 177 |
+
x = node.input[0]
|
| 178 |
+
x_shape = ctx.get_shape(x)
|
| 179 |
+
h = node.input[1]
|
| 180 |
+
h_shape = ctx.get_shape(h)
|
| 181 |
+
p = node.input[3]
|
| 182 |
+
utils.make_sure(
|
| 183 |
+
node.attr["rnn_mode"].s == b"gru",
|
| 184 |
+
"rnn mode other than gru are not supported yet"
|
| 185 |
+
)
|
| 186 |
+
utils.make_sure(
|
| 187 |
+
node.attr["dropout"].f == 0,
|
| 188 |
+
"dropout not supported yet"
|
| 189 |
+
)
|
| 190 |
+
utils.make_sure(
|
| 191 |
+
node.attr["input_mode"].s == b"linear_input",
|
| 192 |
+
"input mode must be linear input"
|
| 193 |
+
)
|
| 194 |
+
num_dirs = 1 if node.attr["direction"].s == b"unidirectional" else 2
|
| 195 |
+
num_layers = int(h_shape[0] / num_dirs)
|
| 196 |
+
num_units = hidden_size = h_shape[2]
|
| 197 |
+
input_size = x_shape[2]
|
| 198 |
+
w_shape = [num_layers * num_dirs, 3 * hidden_size, input_size]
|
| 199 |
+
w_shape_const = ctx.make_const(utils.make_name("w_shape"), np.array(w_shape, dtype=np.int64))
|
| 200 |
+
r_shape = [num_layers * num_dirs, 3 * hidden_size, hidden_size]
|
| 201 |
+
r_shape_const = ctx.make_const(utils.make_name("r_shape"), np.array(r_shape, dtype=np.int64))
|
| 202 |
+
b_shape = [num_layers * num_dirs, 6 * hidden_size]
|
| 203 |
+
b_shape_const = ctx.make_const(utils.make_name("b_shape"), np.array(b_shape, dtype=np.int64))
|
| 204 |
+
zero_const = ctx.make_const(utils.make_name("zero"), np.array([0], dtype=np.int64))
|
| 205 |
+
w_end = np.prod(w_shape)
|
| 206 |
+
w_end_const = ctx.make_const(utils.make_name("w_end"), np.array([w_end], dtype=np.int64))
|
| 207 |
+
r_end = w_end + np.prod(r_shape)
|
| 208 |
+
r_end_const = ctx.make_const(utils.make_name("r_end"), np.array([r_end], dtype=np.int64))
|
| 209 |
+
b_end = r_end + np.prod(b_shape)
|
| 210 |
+
b_end_const = ctx.make_const(utils.make_name("b_end"), np.array([b_end], dtype=np.int64))
|
| 211 |
+
|
| 212 |
+
def name(nm):
|
| 213 |
+
return node.name + "_" + nm
|
| 214 |
+
|
| 215 |
+
ws = [name('W_' + str(i)) for i in range(num_layers * num_dirs)]
|
| 216 |
+
rs = [name('R_' + str(i)) for i in range(num_layers * num_dirs)]
|
| 217 |
+
bs = [name('B_' + str(i)) for i in range(num_layers * num_dirs)]
|
| 218 |
+
hs = [name('H_' + str(i)) for i in range(num_layers * num_dirs)]
|
| 219 |
+
yhs = [name('YH_' + str(i)) for i in range(num_layers * num_dirs)]
|
| 220 |
+
w_flattened = ctx.make_node('Slice', [p, zero_const.output[0], w_end_const.output[0]])
|
| 221 |
+
r_flattened = ctx.make_node('Slice', [p, w_end_const.output[0], r_end_const.output[0]])
|
| 222 |
+
b_flattened = ctx.make_node('Slice', [p, r_end_const.output[0], b_end_const.output[0]])
|
| 223 |
+
w = utils.make_name('W')
|
| 224 |
+
r = utils.make_name('R')
|
| 225 |
+
b = utils.make_name('B')
|
| 226 |
+
ctx.make_node('Reshape', [w_flattened.output[0], w_shape_const.output[0]], outputs=[w])
|
| 227 |
+
ctx.make_node('Reshape', [r_flattened.output[0], r_shape_const.output[0]], outputs=[r])
|
| 228 |
+
ctx.make_node('Reshape', [b_flattened.output[0], b_shape_const.output[0]], outputs=[b])
|
| 229 |
+
ctx.make_node('Split', [w], outputs=ws)
|
| 230 |
+
ctx.make_node('Split', [r], outputs=rs)
|
| 231 |
+
ctx.make_node('Split', [b], outputs=bs)
|
| 232 |
+
ctx.make_node('Split', [h], outputs=hs)
|
| 233 |
+
|
| 234 |
+
builder = GraphBuilder(ctx)
|
| 235 |
+
|
| 236 |
+
xnf = xnb = x
|
| 237 |
+
for i in range(num_layers):
|
| 238 |
+
suffix = '_' + str(i * num_dirs)
|
| 239 |
+
ctx.make_node('GRU',
|
| 240 |
+
[xnf, name('W' + suffix), name('R' + suffix), name('B' + suffix), '', name('H' + suffix)],
|
| 241 |
+
outputs=[name('Y' + suffix), name('YH' + suffix)],
|
| 242 |
+
attr={'direction': 'forward', 'hidden_size': num_units})
|
| 243 |
+
xnf = name(x + suffix)
|
| 244 |
+
builder.make_squeeze({'data': name('Y' + suffix), 'outputs': [xnf], 'axes': [1]})
|
| 245 |
+
if num_dirs == 2:
|
| 246 |
+
suffix = '_' + str(i * 2 + 1)
|
| 247 |
+
ctx.make_node('GRU',
|
| 248 |
+
[xnb, name('W' + suffix), name('R' + suffix), name('B' + suffix), '', name('H' + suffix)],
|
| 249 |
+
outputs=[name('Y' + suffix), name('YH' + suffix)],
|
| 250 |
+
attr={'direction': 'reverse', 'hidden_size': num_units})
|
| 251 |
+
xnb = name(x + suffix)
|
| 252 |
+
builder.make_squeeze({'data': name('Y' + suffix), 'outputs': [xnb], 'axes': [1]})
|
| 253 |
+
ctx.remove_node(node.name)
|
| 254 |
+
if num_dirs == 2:
|
| 255 |
+
ctx.make_node('Concat', [xnf, xnb], outputs=[node.output[0]], attr={'axis': -1})
|
| 256 |
+
else:
|
| 257 |
+
ctx.make_node('Identity', [xnf], outputs=[node.output[0]])
|
| 258 |
+
ctx.make_node('Concat', yhs, outputs=[node.output[1]], attr={'axis': 0})
|
| 259 |
+
|
| 260 |
+
@classmethod
|
| 261 |
+
def version_13(cls, ctx, node, **kwargs):
|
| 262 |
+
# Squeeze changed in Opset 13.
|
| 263 |
+
cls.version_10(ctx, node, **kwargs)
|
lib/python3.10/site-packages/tf2onnx/onnx_opset/signal.py
ADDED
|
@@ -0,0 +1,312 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
"""
|
| 5 |
+
signal
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
from __future__ import division
|
| 9 |
+
from __future__ import print_function
|
| 10 |
+
from __future__ import unicode_literals
|
| 11 |
+
|
| 12 |
+
import logging
|
| 13 |
+
|
| 14 |
+
import numpy as np
|
| 15 |
+
from onnx import onnx_pb, helper
|
| 16 |
+
from onnx.numpy_helper import to_array
|
| 17 |
+
from tf2onnx import utils
|
| 18 |
+
from tf2onnx.handler import tf_op
|
| 19 |
+
from tf2onnx.graph_builder import GraphBuilder
|
| 20 |
+
|
| 21 |
+
logger = logging.getLogger(__name__)
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
# pylint: disable=unused-argument,missing-docstring
|
| 25 |
+
|
| 26 |
+
def make_dft_constant(length, dtype, fft_length):
|
| 27 |
+
n = np.arange(length)
|
| 28 |
+
k = n.reshape((length, 1)).astype(np.float64)
|
| 29 |
+
mat = np.exp(-2j * np.pi * k * n / length)
|
| 30 |
+
mat = mat[:fft_length // 2 + 1]
|
| 31 |
+
both = np.empty((2,) + mat.shape, dtype=dtype)
|
| 32 |
+
both[0, :, :] = np.real(mat)
|
| 33 |
+
both[1, :, :] = np.imag(mat)
|
| 34 |
+
return both
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
class CommonFFTOp:
|
| 38 |
+
@classmethod
|
| 39 |
+
def any_version(cls, const_length, opset, ctx, node, **kwargs):
|
| 40 |
+
"""
|
| 41 |
+
Inspired from `Python implementation of RFFT
|
| 42 |
+
<https://jakevdp.github.io/blog/2013/08/28/understanding-the-fft/>`_.
|
| 43 |
+
|
| 44 |
+
Complex version:
|
| 45 |
+
|
| 46 |
+
::
|
| 47 |
+
|
| 48 |
+
import numpy as np
|
| 49 |
+
|
| 50 |
+
def _DFT_cst(N, fft_length):
|
| 51 |
+
n = np.arange(N)
|
| 52 |
+
k = n.reshape((N, 1)).astype(np.float64)
|
| 53 |
+
M = np.exp(-2j * np.pi * k * n / N)
|
| 54 |
+
return M[:fft_length // 2 + 1]
|
| 55 |
+
|
| 56 |
+
def DFT(x, fft_length=None):
|
| 57 |
+
if len(x.shape) == 1:
|
| 58 |
+
x = x.reshape((-1, 1))
|
| 59 |
+
else:
|
| 60 |
+
x = x.T
|
| 61 |
+
if fft_length is None:
|
| 62 |
+
fft_length = x.shape[0]
|
| 63 |
+
cst = _DFT_cst(x.shape[0], fft_length)
|
| 64 |
+
return np.dot(cst, x).T
|
| 65 |
+
|
| 66 |
+
Real version, first axis is (real, imag) part:
|
| 67 |
+
|
| 68 |
+
::
|
| 69 |
+
|
| 70 |
+
import numpy as np
|
| 71 |
+
|
| 72 |
+
def _DFT_real_cst(N, fft_length):
|
| 73 |
+
n = np.arange(N)
|
| 74 |
+
k = n.reshape((N, 1)).astype(np.float64)
|
| 75 |
+
M = np.exp(-2j * np.pi * k * n / N)
|
| 76 |
+
M = M[:fft_length // 2 + 1]
|
| 77 |
+
both = np.empty((2,) + M.shape)
|
| 78 |
+
both[0, :, :] = np.real(M)
|
| 79 |
+
both[1, :, :] = np.imag(M)
|
| 80 |
+
return both
|
| 81 |
+
|
| 82 |
+
def DFT_real(x, fft_length=None):
|
| 83 |
+
if len(x.shape) == 1:
|
| 84 |
+
x = x.reshape((-1, 1))
|
| 85 |
+
else:
|
| 86 |
+
x = x.T
|
| 87 |
+
if fft_length is None:
|
| 88 |
+
fft_length = x.shape[0]
|
| 89 |
+
cst = _DFT_real_cst(x.shape[0], fft_length)
|
| 90 |
+
res = np.dot(cst, x)
|
| 91 |
+
return np.transpose(res, (0, 2, 1))
|
| 92 |
+
"""
|
| 93 |
+
supported_dtypes = [
|
| 94 |
+
onnx_pb.TensorProto.FLOAT,
|
| 95 |
+
onnx_pb.TensorProto.FLOAT16,
|
| 96 |
+
onnx_pb.TensorProto.DOUBLE,
|
| 97 |
+
onnx_pb.TensorProto.COMPLEX64,
|
| 98 |
+
onnx_pb.TensorProto.COMPLEX128,
|
| 99 |
+
]
|
| 100 |
+
consumers = ctx.find_output_consumers(node.output[0])
|
| 101 |
+
consumer_types = set(op.type for op in consumers)
|
| 102 |
+
utils.make_sure(
|
| 103 |
+
consumer_types == {'ComplexAbs'},
|
| 104 |
+
"Current implementation of RFFT or FFT only allows ComplexAbs as consumer not %r",
|
| 105 |
+
consumer_types)
|
| 106 |
+
|
| 107 |
+
input_name = node.input[0]
|
| 108 |
+
onnx_dtype = ctx.get_dtype(input_name)
|
| 109 |
+
utils.make_sure(onnx_dtype in supported_dtypes, "Unsupported input type.")
|
| 110 |
+
shape = ctx.get_shape(node.input[0])
|
| 111 |
+
shape_n = shape[-1]
|
| 112 |
+
|
| 113 |
+
if onnx_dtype in (onnx_pb.TensorProto.COMPLEX64, onnx_pb.TensorProto.COMPLEX128):
|
| 114 |
+
parent = ctx.get_node_by_output_in_current_graph(node.input[0])
|
| 115 |
+
utils.make_sure(
|
| 116 |
+
parent.type == 'Cast' and parent.get_attr_value('to') == onnx_dtype,
|
| 117 |
+
"Current implementation of FFT or RFFT assumes the input is real or complex produced "
|
| 118 |
+
"by a node Cast just before this one.")
|
| 119 |
+
input_name = parent.input[0]
|
| 120 |
+
onnx_dtype = ctx.get_dtype(input_name)
|
| 121 |
+
|
| 122 |
+
np_dtype = utils.map_onnx_to_numpy_type(onnx_dtype)
|
| 123 |
+
|
| 124 |
+
if np_dtype == np.float16:
|
| 125 |
+
res_onnx_dtype = utils.map_numpy_to_onnx_dtype(np.float16)
|
| 126 |
+
np_dtype = np.float16
|
| 127 |
+
elif np_dtype in (np.float32, np.complex64):
|
| 128 |
+
res_onnx_dtype = utils.map_numpy_to_onnx_dtype(np.float32)
|
| 129 |
+
np_dtype = np.float32
|
| 130 |
+
else:
|
| 131 |
+
res_onnx_dtype = utils.map_numpy_to_onnx_dtype(np.float64)
|
| 132 |
+
np_dtype = np.float64
|
| 133 |
+
|
| 134 |
+
if const_length:
|
| 135 |
+
# RFFT: length of FFT is known, some computation
|
| 136 |
+
# (see function make_dft_constant)
|
| 137 |
+
# can be done at conversion time and stored as constant
|
| 138 |
+
utils.make_sure(len(node.input) == 2, "Two inputs expected not %r", len(node.input))
|
| 139 |
+
|
| 140 |
+
# This input should be a constant.
|
| 141 |
+
fft_length_name = node.input[1]
|
| 142 |
+
node_fft_length = ctx.get_node_by_output(fft_length_name, search_in_parent_graphs=True)
|
| 143 |
+
utils.make_sure(node_fft_length.type == 'Const',
|
| 144 |
+
"fft_length should be a constant, the other case is not implemented yet.")
|
| 145 |
+
value = node_fft_length.get_attr("value")
|
| 146 |
+
value_array = to_array(value.t)
|
| 147 |
+
utils.make_sure(value_array.shape == (1,), "Unexpected shape for fft_length (%r)", value_array.shape)
|
| 148 |
+
fft_length = value_array[0]
|
| 149 |
+
|
| 150 |
+
# TODO: handle this parameter when onnx.helper.make_node is fixed.
|
| 151 |
+
# Tcomplex = node.get_attr("Tcomplex")
|
| 152 |
+
|
| 153 |
+
real_imag_part = make_dft_constant(shape_n, np_dtype, fft_length)
|
| 154 |
+
onx_real_imag_part = ctx.make_const(
|
| 155 |
+
name=utils.make_name('cst_rfft_%d' % shape_n), np_val=real_imag_part)
|
| 156 |
+
onx_real_imag_part_name = onx_real_imag_part.name
|
| 157 |
+
else:
|
| 158 |
+
# FFT: length of FFT is unknown, the matrix
|
| 159 |
+
# created by function make_dft_constant must be
|
| 160 |
+
# done in ONNX.
|
| 161 |
+
dyn_shape_all = ctx.make_node("Shape", inputs=[input_name],
|
| 162 |
+
name=utils.make_name('CPLX_' + node.name + 'shape'))
|
| 163 |
+
m1_cst = ctx.make_const(name=utils.make_name('CPLX_m1'), np_val=np.array([-1], dtype=np.int64))
|
| 164 |
+
dyn_shape = ctx.make_node('Gather', inputs=[dyn_shape_all.output[0], m1_cst.name])
|
| 165 |
+
one_tensor = helper.make_tensor("value", res_onnx_dtype, dims=[1], vals=[1])
|
| 166 |
+
cst_1 = ctx.make_node("ConstantOfShape", inputs=[dyn_shape.output[0]], attr={"value": one_tensor})
|
| 167 |
+
just_0 = ctx.make_const(name=utils.make_name('CPLX1'), np_val=np.array([0], dtype=np.int64))
|
| 168 |
+
rng1 = ctx.make_node("CumSum", inputs=[cst_1.output[0], just_0.name],
|
| 169 |
+
name=utils.make_name('CPLX_' + node.name + 'range'))
|
| 170 |
+
p1_cst = ctx.make_const(name=utils.make_name('CPLX_p1'), np_val=np.array([1], dtype=np_dtype))
|
| 171 |
+
rng = ctx.make_node("Sub", inputs=[rng1.output[0], p1_cst.name],
|
| 172 |
+
name=utils.make_name('CPLX_' + node.name + 'range'))
|
| 173 |
+
resh_cst = ctx.make_const(name=utils.make_name('CPLX_reshape'), np_val=np.array([1, -1], dtype=np.int64))
|
| 174 |
+
rng_tr1 = ctx.make_node("Reshape", inputs=[rng.output[0], resh_cst.name],
|
| 175 |
+
name=utils.make_name('CPLX_' + node.name + 'range'))
|
| 176 |
+
resh_cst = ctx.make_const(name=utils.make_name('CPLX_reshape'), np_val=np.array([-1, 1], dtype=np.int64))
|
| 177 |
+
rng_tr2 = ctx.make_node("Reshape", inputs=[rng.output[0], resh_cst.name],
|
| 178 |
+
name=utils.make_name('CPLX_' + node.name + 'range'))
|
| 179 |
+
rng_mat = ctx.make_node('MatMul', inputs=[rng_tr2.output[0], rng_tr1.output[0]],
|
| 180 |
+
name=utils.make_name('CPLX_' + node.name + 'range2'))
|
| 181 |
+
pi_cst = ctx.make_const(name=utils.make_name('CPLX_pi'), np_val=np.array([np.pi * 2], dtype=np_dtype))
|
| 182 |
+
angle_pi = ctx.make_node("Mul", inputs=[rng_mat.output[0], pi_cst.name],
|
| 183 |
+
name=utils.make_name('CPLX_' + node.name + 'angle_pi'))
|
| 184 |
+
shape_cast = ctx.make_node('Cast', inputs=[dyn_shape.output[0]], attr={'to': res_onnx_dtype})
|
| 185 |
+
angle_pibn = ctx.make_node("Div", inputs=[angle_pi.output[0], shape_cast.output[0]],
|
| 186 |
+
name=utils.make_name('CPLX_' + node.name + 'angle'))
|
| 187 |
+
if opset >= 13:
|
| 188 |
+
angle = ctx.make_node("Unsqueeze", inputs=[angle_pibn.output[0], just_0.name],
|
| 189 |
+
name=utils.make_name('CPLX_' + node.name + 'angles'))
|
| 190 |
+
else:
|
| 191 |
+
angle = ctx.make_node("Unsqueeze", inputs=[angle_pibn.output[0]],
|
| 192 |
+
name=utils.make_name('CPLX_' + node.name + 'angles'),
|
| 193 |
+
attr={'axes': [0]})
|
| 194 |
+
rng_cos = ctx.make_node("Cos", inputs=[angle.output[0]],
|
| 195 |
+
name=utils.make_name('CPLX_' + node.name + 'cos'))
|
| 196 |
+
rng_sin = ctx.make_node("Sin", inputs=[angle.output[0]],
|
| 197 |
+
name=utils.make_name('CPLX_' + node.name + 'sin'))
|
| 198 |
+
onx_real_imag_part = ctx.make_node("Concat", inputs=[rng_cos.output[0], rng_sin.output[0]],
|
| 199 |
+
name=utils.make_name('CPLX_' + node.name + '_cst_fft'),
|
| 200 |
+
attr={'axis': 0})
|
| 201 |
+
onx_real_imag_part_name = onx_real_imag_part.output[0]
|
| 202 |
+
|
| 203 |
+
shapei = list(np.arange(len(shape)))
|
| 204 |
+
perm = shapei[:-2] + [shapei[-1], shapei[-2]]
|
| 205 |
+
trx = ctx.make_node(
|
| 206 |
+
"Transpose", inputs=[input_name], attr=dict(perm=perm),
|
| 207 |
+
name=utils.make_name(node.name + 'tr'))
|
| 208 |
+
|
| 209 |
+
ctx.remove_node(node.name)
|
| 210 |
+
mult = ctx.make_node(
|
| 211 |
+
"MatMul", inputs=[onx_real_imag_part_name, trx.output[0]],
|
| 212 |
+
name=utils.make_name('CPLX_' + node.name + 'rfft'))
|
| 213 |
+
|
| 214 |
+
new_shape = [2] + list(shape)
|
| 215 |
+
shapei = list(np.arange(len(new_shape)))
|
| 216 |
+
perm = shapei[:-2] + [shapei[-1], shapei[-2]]
|
| 217 |
+
last_node = ctx.make_node(
|
| 218 |
+
"Transpose", inputs=[mult.output[0]], attr=dict(perm=perm),
|
| 219 |
+
name=utils.make_name('CPLX_' + node.name + 'rfft'),
|
| 220 |
+
shapes=[new_shape], dtypes=[res_onnx_dtype])
|
| 221 |
+
|
| 222 |
+
ctx.replace_all_inputs(node.output[0], last_node.output[0]) # ops=ctx.get_nodes()
|
| 223 |
+
|
| 224 |
+
|
| 225 |
+
@tf_op("RFFT")
|
| 226 |
+
class RFFTOp(CommonFFTOp):
|
| 227 |
+
# support more dtype
|
| 228 |
+
|
| 229 |
+
@classmethod
|
| 230 |
+
def version_1(cls, ctx, node, **kwargs):
|
| 231 |
+
return cls.any_version(True, 1, ctx, node, **kwargs)
|
| 232 |
+
|
| 233 |
+
|
| 234 |
+
@tf_op("FFT")
|
| 235 |
+
class FFTOp(CommonFFTOp):
|
| 236 |
+
# support more dtype
|
| 237 |
+
|
| 238 |
+
@classmethod
|
| 239 |
+
def version_1(cls, ctx, node, **kwargs):
|
| 240 |
+
return cls.any_version(False, 1, ctx, node, **kwargs)
|
| 241 |
+
|
| 242 |
+
@classmethod
|
| 243 |
+
def version_13(cls, ctx, node, **kwargs):
|
| 244 |
+
return cls.any_version(False, 13, ctx, node, **kwargs)
|
| 245 |
+
|
| 246 |
+
|
| 247 |
+
@tf_op("ComplexAbs")
|
| 248 |
+
class ComplexAbsOp:
|
| 249 |
+
# support more dtype
|
| 250 |
+
|
| 251 |
+
@classmethod
|
| 252 |
+
def any_version(cls, opset, ctx, node, **kwargs):
|
| 253 |
+
"""
|
| 254 |
+
Computes the modules of a complex.
|
| 255 |
+
If the matrix dtype is not complex64 or complex128,
|
| 256 |
+
it assumes the first dimension means real part (0)
|
| 257 |
+
and imaginary part (1, :, :...).
|
| 258 |
+
"""
|
| 259 |
+
supported_dtypes = [
|
| 260 |
+
onnx_pb.TensorProto.FLOAT,
|
| 261 |
+
onnx_pb.TensorProto.FLOAT16,
|
| 262 |
+
onnx_pb.TensorProto.DOUBLE,
|
| 263 |
+
onnx_pb.TensorProto.COMPLEX64,
|
| 264 |
+
onnx_pb.TensorProto.COMPLEX128,
|
| 265 |
+
]
|
| 266 |
+
onnx_dtype = ctx.get_dtype(node.input[0])
|
| 267 |
+
utils.make_sure(onnx_dtype in supported_dtypes, "Unsupported input type.")
|
| 268 |
+
shape = ctx.get_shape(node.input[0])
|
| 269 |
+
np_dtype = utils.map_onnx_to_numpy_type(onnx_dtype)
|
| 270 |
+
utils.make_sure(shape[0] == 2, "ComplexAbs expected the first dimension to be 2 but shape is %r", shape)
|
| 271 |
+
|
| 272 |
+
ind0 = ctx.make_const(name=utils.make_name('cst0'), np_val=np.array([0], dtype=np.int64))
|
| 273 |
+
ind1 = ctx.make_const(name=utils.make_name('cst1'), np_val=np.array([1], dtype=np.int64))
|
| 274 |
+
p2 = ctx.make_const(name=utils.make_name('p2'), np_val=np.array([2], dtype=np_dtype))
|
| 275 |
+
|
| 276 |
+
real_part = ctx.make_node(
|
| 277 |
+
'Gather', inputs=[node.input[0], ind0.name], attr=dict(axis=0),
|
| 278 |
+
name=utils.make_name('Real_' + node.name))
|
| 279 |
+
imag_part = ctx.make_node(
|
| 280 |
+
'Gather', inputs=[node.input[0], ind1.name], attr=dict(axis=0),
|
| 281 |
+
name=utils.make_name('Imag_' + node.name))
|
| 282 |
+
|
| 283 |
+
real_part2 = ctx.make_node(
|
| 284 |
+
'Pow', inputs=[real_part.output[0], p2.name],
|
| 285 |
+
name=utils.make_name(real_part.name + 'p2p'))
|
| 286 |
+
|
| 287 |
+
imag_part2 = ctx.make_node(
|
| 288 |
+
'Pow', inputs=[imag_part.output[0], p2.name],
|
| 289 |
+
name=utils.make_name(imag_part.name + 'p2p'))
|
| 290 |
+
|
| 291 |
+
ctx.remove_node(node.name)
|
| 292 |
+
add = ctx.make_node(
|
| 293 |
+
"Add", inputs=[real_part2.output[0], imag_part2.output[0]],
|
| 294 |
+
name=utils.make_name('ComplexAbs_' + node.name))
|
| 295 |
+
|
| 296 |
+
squeezed = GraphBuilder(ctx).make_squeeze(
|
| 297 |
+
{'data': add.output[0], 'axes': [0]}, name=utils.make_name('ComplexAbs' + node.name), return_node=True)
|
| 298 |
+
|
| 299 |
+
last_node = ctx.make_node(
|
| 300 |
+
"Sqrt", inputs=squeezed.output[:1],
|
| 301 |
+
name=utils.make_name('ComplexAbs' + node.name),
|
| 302 |
+
shapes=[shape[1:]], dtypes=[onnx_dtype])
|
| 303 |
+
|
| 304 |
+
ctx.replace_all_inputs(node.output[0], last_node.output[0]) # ops=ctx.get_nodes()
|
| 305 |
+
|
| 306 |
+
@classmethod
|
| 307 |
+
def version_1(cls, ctx, node, **kwargs):
|
| 308 |
+
cls.any_version(1, ctx, node, **kwargs)
|
| 309 |
+
|
| 310 |
+
@classmethod
|
| 311 |
+
def version_13(cls, ctx, node, **kwargs):
|
| 312 |
+
cls.any_version(13, ctx, node, **kwargs)
|
lib/python3.10/site-packages/tf2onnx/onnx_opset/tensor.py
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
lib/python3.10/site-packages/tf2onnx/onnx_opset/traditionalml.py
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
"""
|
| 5 |
+
traditional ml
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
from __future__ import division
|
| 9 |
+
from __future__ import print_function
|
| 10 |
+
from __future__ import unicode_literals
|
| 11 |
+
|
| 12 |
+
import logging
|
| 13 |
+
|
| 14 |
+
logger = logging.getLogger(__name__)
|
lib/python3.10/site-packages/tf2onnx/optimizer/__init__.py
ADDED
|
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
|
| 3 |
+
"""tf2onnx.optimizer module"""
|
| 4 |
+
|
| 5 |
+
from __future__ import division
|
| 6 |
+
from __future__ import print_function
|
| 7 |
+
from __future__ import unicode_literals
|
| 8 |
+
|
| 9 |
+
from collections import OrderedDict
|
| 10 |
+
import copy
|
| 11 |
+
|
| 12 |
+
from .const_fold_optimizer import ConstFoldOptimizer
|
| 13 |
+
from .identity_optimizer import IdentityOptimizer
|
| 14 |
+
from .merge_duplicated_nodes_optimizer import MergeDuplicatedNodesOptimizer
|
| 15 |
+
from .transpose_optimizer import TransposeOptimizer
|
| 16 |
+
from .loop_optimizer import LoopOptimizer
|
| 17 |
+
from .back_to_back_optimizer import BackToBackOptimizer
|
| 18 |
+
from .upsample_optimizer import UpsampleOptimizer
|
| 19 |
+
from .const_dequantize_optimizer import ConstDequantizeOptimizer
|
| 20 |
+
from .. import logging
|
| 21 |
+
|
| 22 |
+
# optimizer sequence need to be considered carefully
|
| 23 |
+
_optimizers = OrderedDict([
|
| 24 |
+
("optimize_transpose", TransposeOptimizer),
|
| 25 |
+
("remove_redundant_upsample", UpsampleOptimizer),
|
| 26 |
+
("fold_constants", ConstFoldOptimizer),
|
| 27 |
+
("const_dequantize_optimizer", ConstDequantizeOptimizer),
|
| 28 |
+
("loop_optimizer", LoopOptimizer),
|
| 29 |
+
# merge_duplication should be used after optimize_transpose
|
| 30 |
+
# for optimize_transpose may have some trans nodes that can be merge
|
| 31 |
+
("merge_duplication", MergeDuplicatedNodesOptimizer),
|
| 32 |
+
("remove_identity", IdentityOptimizer),
|
| 33 |
+
("remove_back_to_back", BackToBackOptimizer),
|
| 34 |
+
])
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def _get_optimizers():
|
| 38 |
+
return _optimizers
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def optimize_graph(graph, catch_errors=True):
|
| 42 |
+
""" Optimize graph, return optimized graph. No throw if catch_errors is true"""
|
| 43 |
+
logger = logging.getLogger(__name__)
|
| 44 |
+
logger.info("Optimizing ONNX model")
|
| 45 |
+
|
| 46 |
+
before = graph.dump_node_statistics()
|
| 47 |
+
opts = _get_optimizers()
|
| 48 |
+
continue_flag = True
|
| 49 |
+
while continue_flag:
|
| 50 |
+
continue_flag = False
|
| 51 |
+
for name, factory in opts.items():
|
| 52 |
+
logger.verbose("Apply %s", name)
|
| 53 |
+
if catch_errors:
|
| 54 |
+
try:
|
| 55 |
+
current = copy.deepcopy(graph)
|
| 56 |
+
opt = factory()
|
| 57 |
+
graph = opt.optimize(current) or graph
|
| 58 |
+
continue_flag = continue_flag or opt.graph_been_opt
|
| 59 |
+
except Exception: # pylint: disable=broad-except
|
| 60 |
+
# if current optimizer fails, continue with other optimizers
|
| 61 |
+
logger.warning("Failed to apply %s", name, exc_info=1)
|
| 62 |
+
else:
|
| 63 |
+
opt = factory()
|
| 64 |
+
graph = opt.optimize(graph)
|
| 65 |
+
continue_flag = continue_flag or opt.graph_been_opt
|
| 66 |
+
|
| 67 |
+
try:
|
| 68 |
+
graph.topological_sort(graph.get_nodes())
|
| 69 |
+
except Exception: # pylint: disable=broad-except
|
| 70 |
+
logger.warning("Failed topological_sort", exc_info=1)
|
| 71 |
+
|
| 72 |
+
after = graph.dump_node_statistics()
|
| 73 |
+
diff = copy.deepcopy(after)
|
| 74 |
+
diff.subtract(before)
|
| 75 |
+
diff = ["{} {} ({}->{})".format(k, str(v) if v < 0 else '+' + str(v), before.get(k, 0), after.get(k, 0))
|
| 76 |
+
for k, v in sorted(diff.items()) if v != 0]
|
| 77 |
+
logger.info("After optimization: %s", ', '.join(diff) if diff else "no change")
|
| 78 |
+
|
| 79 |
+
return graph
|
lib/python3.10/site-packages/tf2onnx/optimizer/back_to_back_optimizer.py
ADDED
|
@@ -0,0 +1,254 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
"""Back_To_Back Optimizer.
|
| 5 |
+
Collapse consecutive nodes into 1 node if possible.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
from __future__ import unicode_literals
|
| 9 |
+
|
| 10 |
+
import numpy as np
|
| 11 |
+
from tf2onnx.utils import ONNX_DTYPE_NAMES # lgtm[py/unsafe-cyclic-import]
|
| 12 |
+
from .optimizer_base import GraphOptimizerBase # lgtm[py/unsafe-cyclic-import]
|
| 13 |
+
|
| 14 |
+
# pylint: disable=logging-not-lazy,unused-argument,missing-docstring,unused-variable,arguments-differ
|
| 15 |
+
|
| 16 |
+
_func_map = {}
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def _register_func(op_type):
|
| 20 |
+
def _internal_fun(func):
|
| 21 |
+
_func_map[op_type] = func
|
| 22 |
+
return func
|
| 23 |
+
|
| 24 |
+
return _internal_fun
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
class BackToBackOptimizer(GraphOptimizerBase):
|
| 28 |
+
"""Remove back-to-back nodes e.g. 'Cast'
|
| 29 |
+
"""
|
| 30 |
+
|
| 31 |
+
def __init__(self): # pylint: disable=useless-super-delegation
|
| 32 |
+
super(BackToBackOptimizer, self).__init__()
|
| 33 |
+
|
| 34 |
+
def _optimize(self, graph):
|
| 35 |
+
return self._apply_optimization(graph, self._optimize_at_current_graph_level)
|
| 36 |
+
|
| 37 |
+
def _optimize_at_current_graph_level(self, g):
|
| 38 |
+
for optype, handler in _func_map.items():
|
| 39 |
+
# candidate nodes for removal/optimization
|
| 40 |
+
nodes = [n for n in g.get_nodes() if n.type in optype]
|
| 41 |
+
|
| 42 |
+
# topological sort of candidates
|
| 43 |
+
# simplifying assumption for back-to-back-optimizer is
|
| 44 |
+
# the op_types have 1 input, 1 output, but multiple consumers
|
| 45 |
+
has_dependencies = set()
|
| 46 |
+
consumer_node_ids = {n.output[0]: [] for n in nodes}
|
| 47 |
+
for n in nodes:
|
| 48 |
+
if n.input[0] in consumer_node_ids:
|
| 49 |
+
consumer_node_ids[n.input[0]].extend([n])
|
| 50 |
+
has_dependencies.add(n.output[0])
|
| 51 |
+
|
| 52 |
+
# q = starting nodes with no dependencies
|
| 53 |
+
q = list(set(consumer_node_ids.keys()) - has_dependencies)
|
| 54 |
+
while q:
|
| 55 |
+
nodeid = q.pop(0)
|
| 56 |
+
node = g.get_node_by_output(nodeid, False)
|
| 57 |
+
consumer_nodes = consumer_node_ids[nodeid]
|
| 58 |
+
|
| 59 |
+
if len(consumer_nodes) > 0:
|
| 60 |
+
all_consumers = g.find_output_consumers(node.output[0])
|
| 61 |
+
if len(all_consumers) != len(consumer_nodes):
|
| 62 |
+
# if first node is used elsewhere, skip
|
| 63 |
+
continue
|
| 64 |
+
if set(node.output) & set(g.outputs):
|
| 65 |
+
# if this node is part of graph outputs, skip
|
| 66 |
+
continue
|
| 67 |
+
q2 = handler(g, node, consumer_nodes)
|
| 68 |
+
# add more nodes which can now be processed
|
| 69 |
+
q.extend(q2)
|
| 70 |
+
return g
|
| 71 |
+
|
| 72 |
+
@staticmethod
|
| 73 |
+
@_register_func("Cast")
|
| 74 |
+
def _optimize_cast(g, node, consumer_nodes):
|
| 75 |
+
"""remove long chains of cast ops"""
|
| 76 |
+
q2 = []
|
| 77 |
+
type1 = node.get_attr('to').i
|
| 78 |
+
type1_name = ONNX_DTYPE_NAMES[type1] if type1 in ONNX_DTYPE_NAMES else ''
|
| 79 |
+
|
| 80 |
+
# if parent node is cast node, and same type, delete this one
|
| 81 |
+
pnode = node.inputs[0]
|
| 82 |
+
if pnode.type == 'Cast':
|
| 83 |
+
type2 = pnode.get_attr('to').i
|
| 84 |
+
if type1 == type2:
|
| 85 |
+
for node2 in consumer_nodes:
|
| 86 |
+
g.replace_input(node2, node2.input[0], node.input[0], 0)
|
| 87 |
+
q2.append(node2.output[0])
|
| 88 |
+
g.remove_node(node.name)
|
| 89 |
+
return q2
|
| 90 |
+
|
| 91 |
+
# otherwise, check consumer cast nodes for a target type
|
| 92 |
+
# that contains more information than current type
|
| 93 |
+
can_reduce = True
|
| 94 |
+
for node2 in consumer_nodes:
|
| 95 |
+
type2 = node2.get_attr('to').i
|
| 96 |
+
type2_name = ONNX_DTYPE_NAMES[type2] if type2 in ONNX_DTYPE_NAMES else ''
|
| 97 |
+
|
| 98 |
+
if 'float' in type1_name or type1_name == 'double':
|
| 99 |
+
# high information type. ok to eliminate
|
| 100 |
+
pass
|
| 101 |
+
elif 'int' in type1_name:
|
| 102 |
+
# int* and uint* are mix of high and low information.
|
| 103 |
+
# for safety, keep the current node, unless type2 is bool,
|
| 104 |
+
# in which case it's ok to remove node
|
| 105 |
+
if type1 != type2 and type2_name != 'bool':
|
| 106 |
+
can_reduce = False
|
| 107 |
+
elif type1_name == 'bool':
|
| 108 |
+
# bool is low information, so don't eliminate
|
| 109 |
+
if type1 != type2:
|
| 110 |
+
can_reduce = False
|
| 111 |
+
elif type1_name == 'string':
|
| 112 |
+
# can always remove string
|
| 113 |
+
pass
|
| 114 |
+
else:
|
| 115 |
+
# some odd type, keep node
|
| 116 |
+
can_reduce = False
|
| 117 |
+
q2.append(node2.output[0])
|
| 118 |
+
|
| 119 |
+
if can_reduce:
|
| 120 |
+
for node2 in consumer_nodes:
|
| 121 |
+
g.replace_input(node2, node2.input[0], node.input[0], 0)
|
| 122 |
+
g.remove_node(node.name)
|
| 123 |
+
return q2
|
| 124 |
+
|
| 125 |
+
@staticmethod
|
| 126 |
+
@_register_func("Transpose")
|
| 127 |
+
def _optimize_transpose(g, node, consumer_nodes):
|
| 128 |
+
"""remove long chains of transpose ops"""
|
| 129 |
+
t1 = list(node.get_attr('perm').ints)
|
| 130 |
+
q2 = []
|
| 131 |
+
for node2 in consumer_nodes:
|
| 132 |
+
g.replace_input(node2, node2.input[0], node.input[0], 0)
|
| 133 |
+
t2 = list(node2.get_attr('perm').ints)
|
| 134 |
+
new_perm = [t1[i] for i in t2]
|
| 135 |
+
# check if node2 can be removed. otherwise only update
|
| 136 |
+
if new_perm == list(range(len(t2))):
|
| 137 |
+
# both nodes can be deleted
|
| 138 |
+
shape = g.get_shape(node2.output[0])
|
| 139 |
+
dtype = g.get_dtype(node2.output[0])
|
| 140 |
+
node2_consumers = g.find_output_consumers(node2.output[0])
|
| 141 |
+
g.replace_all_inputs(node2.output[0], node.input[0], ops=node2_consumers)
|
| 142 |
+
g.remove_node(node2.name)
|
| 143 |
+
if set(node2.output) & set(g.outputs):
|
| 144 |
+
g.make_node("Identity", [node.input[0]],
|
| 145 |
+
outputs=node2.output, shapes=[shape], dtypes=[dtype])
|
| 146 |
+
else:
|
| 147 |
+
node2.set_attr('perm', [t1[i] for i in t2])
|
| 148 |
+
q2.append(node2.output[0])
|
| 149 |
+
g.remove_node(node.name)
|
| 150 |
+
return q2
|
| 151 |
+
|
| 152 |
+
@staticmethod
|
| 153 |
+
@_register_func(('Squeeze', 'Unsqueeze'))
|
| 154 |
+
def _optimize_squeeze_unsqueeze(g, node, consumer_nodes):
|
| 155 |
+
"""remove pairs of squeeze-unsqueeze nodes"""
|
| 156 |
+
if node.type != 'Squeeze' or len(consumer_nodes) != 1:
|
| 157 |
+
# no need to return any value, since not removing long chain of nodes
|
| 158 |
+
return []
|
| 159 |
+
|
| 160 |
+
node2 = consumer_nodes[0]
|
| 161 |
+
if node2.type != 'Unsqueeze':
|
| 162 |
+
return []
|
| 163 |
+
|
| 164 |
+
axes_match = False
|
| 165 |
+
if g.opset <= 12 and node.get_attr('axes').ints == node2.get_attr('axes').ints:
|
| 166 |
+
axes_match = True
|
| 167 |
+
|
| 168 |
+
# In opset 13, axes is an input. Optional for squeeze op.
|
| 169 |
+
if g.opset >= 13 and len(node.input) == 2:
|
| 170 |
+
if node.input[1] == node2.input[1]:
|
| 171 |
+
axes_match = True
|
| 172 |
+
elif node.inputs[1].is_const() and node2.inputs[1].is_const() and \
|
| 173 |
+
node.inputs[1].get_tensor_value(as_list=True) == node2.inputs[1].get_tensor_value(as_list=True):
|
| 174 |
+
axes_match = True
|
| 175 |
+
|
| 176 |
+
# if squeeze followed by unsqueeze is on diff axes, skip
|
| 177 |
+
if not axes_match:
|
| 178 |
+
return []
|
| 179 |
+
|
| 180 |
+
# if unsqueeze output is graph output, skip
|
| 181 |
+
if set(node2.output) & set(g.outputs):
|
| 182 |
+
return []
|
| 183 |
+
|
| 184 |
+
node2_consumers = g.find_output_consumers(node2.output[0])
|
| 185 |
+
g.replace_all_inputs(node2.output[0], node.input[0], ops=node2_consumers)
|
| 186 |
+
g.remove_node(node.name)
|
| 187 |
+
g.remove_node(node2.name)
|
| 188 |
+
return []
|
| 189 |
+
|
| 190 |
+
@staticmethod
|
| 191 |
+
@_register_func(('Conv', 'BatchNormalization'))
|
| 192 |
+
def _optimize_conv_batchnorm_fusion(g, node, consumer_nodes):
|
| 193 |
+
"""fuse conv and batchnorm"""
|
| 194 |
+
if node.type != 'Conv' or len(consumer_nodes) != 1:
|
| 195 |
+
# can only fuse 1 conv + batchnorm
|
| 196 |
+
return []
|
| 197 |
+
|
| 198 |
+
node2 = consumer_nodes[0]
|
| 199 |
+
if node2.type != 'BatchNormalization':
|
| 200 |
+
return []
|
| 201 |
+
|
| 202 |
+
# if batchnorm is a graph output, skip
|
| 203 |
+
if set(node2.output) & set(g.outputs):
|
| 204 |
+
return []
|
| 205 |
+
|
| 206 |
+
if not node.inputs[1].is_const():
|
| 207 |
+
return []
|
| 208 |
+
weights = node.inputs[1].get_tensor_value(as_list=False)
|
| 209 |
+
# if not 4D, NCHW skip
|
| 210 |
+
if len(weights.shape) != 4:
|
| 211 |
+
return []
|
| 212 |
+
|
| 213 |
+
# optional bias value
|
| 214 |
+
if len(node.inputs) > 2:
|
| 215 |
+
if not node.inputs[2].is_const():
|
| 216 |
+
return []
|
| 217 |
+
bias = node.inputs[2].get_tensor_value(as_list=False)
|
| 218 |
+
else:
|
| 219 |
+
bias = np.array(0, dtype=weights.dtype)
|
| 220 |
+
|
| 221 |
+
# scale, offset, mean, var be const, otherwise skip
|
| 222 |
+
if False in [node2.inputs[i].is_const() for i in [1, 2, 3, 4]]:
|
| 223 |
+
return []
|
| 224 |
+
|
| 225 |
+
# if bn outputs used elsewhere, cannot fuse
|
| 226 |
+
for i in range(1, len(node2.output)):
|
| 227 |
+
if g.find_output_consumers(node2.output[i]):
|
| 228 |
+
return []
|
| 229 |
+
|
| 230 |
+
weights = weights.transpose(2, 3, 1, 0)
|
| 231 |
+
scale = node2.inputs[1].get_tensor_value(as_list=False)
|
| 232 |
+
offset = node2.inputs[2].get_tensor_value(as_list=False)
|
| 233 |
+
mean = node2.inputs[3].get_tensor_value(as_list=False)
|
| 234 |
+
var = node2.inputs[4].get_tensor_value(as_list=False)
|
| 235 |
+
epsilon = node2.get_attr('epsilon').f
|
| 236 |
+
|
| 237 |
+
scale_new = scale / np.sqrt(var + epsilon)
|
| 238 |
+
weights_new = weights * scale_new
|
| 239 |
+
weights_new = weights_new.transpose(3, 2, 0, 1)
|
| 240 |
+
bias_new = (bias - mean) * scale_new + offset
|
| 241 |
+
bias_new_const = g.make_const(node.name + '_bias_fused_bn', bias_new.astype(bias.dtype))
|
| 242 |
+
weights_new_const = g.make_const(node.name + '_weights_fused_bn', weights_new.astype(weights.dtype))
|
| 243 |
+
g.replace_inputs(node, [node.input[0], weights_new_const.output[0], bias_new_const.output[0]])
|
| 244 |
+
|
| 245 |
+
# fuse conv and bn, delete bn
|
| 246 |
+
node2_output = node2.output[:1]
|
| 247 |
+
node2_shape = g.get_shape(node2.output[0])
|
| 248 |
+
node2_dtype = g.get_dtype(node2.output[0])
|
| 249 |
+
g.remove_node(node2.name)
|
| 250 |
+
# the setter makes a copy
|
| 251 |
+
node.output = node2_output
|
| 252 |
+
g.set_shape(node2_output[0], node2_shape)
|
| 253 |
+
g.set_dtype(node2_output[0], node2_dtype)
|
| 254 |
+
return []
|
lib/python3.10/site-packages/tf2onnx/optimizer/const_dequantize_optimizer.py
ADDED
|
@@ -0,0 +1,112 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
"""const dequantize Optimizer.
|
| 5 |
+
if a dequantize op's inputs are const we may be able to fold it through the next op
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
from .optimizer_base import GraphOptimizerBase
|
| 9 |
+
from .const_fold_optimizer import ConstFoldOptimizer
|
| 10 |
+
|
| 11 |
+
# pylint: disable=logging-not-lazy,unused-argument,missing-docstring
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class ConstDequantizeOptimizer(GraphOptimizerBase):
|
| 15 |
+
|
| 16 |
+
def __init__(self): # pylint: disable=useless-super-delegation
|
| 17 |
+
super(ConstDequantizeOptimizer, self).__init__()
|
| 18 |
+
|
| 19 |
+
def _optimize(self, graph):
|
| 20 |
+
return self._apply_optimization(graph, self._optimize_at_current_graph_level)
|
| 21 |
+
|
| 22 |
+
def _optimize_at_current_graph_level(self, graph):
|
| 23 |
+
graph_changed = True
|
| 24 |
+
while graph_changed:
|
| 25 |
+
graph_changed = False
|
| 26 |
+
ops = graph.get_nodes()
|
| 27 |
+
for op in ops:
|
| 28 |
+
if self._fold_node(op, graph):
|
| 29 |
+
graph_changed = True
|
| 30 |
+
self.graph_been_opt = True
|
| 31 |
+
return graph
|
| 32 |
+
|
| 33 |
+
def _fold_node(self, node, graph):
|
| 34 |
+
""" if a dequantize op's inputs are const and it is fed into a tensor reshaping op, we can apply the op
|
| 35 |
+
directly to the quantized inputs. Returns True if the graph is changed.
|
| 36 |
+
"""
|
| 37 |
+
if node.type not in ["Transpose", "Reshape", "Unsqueeze"]:
|
| 38 |
+
return False
|
| 39 |
+
dequant_node = node.inputs[0]
|
| 40 |
+
if dequant_node.type != "DequantizeLinear":
|
| 41 |
+
return False
|
| 42 |
+
if len(graph.find_output_consumers(dequant_node.output[0])) > 1:
|
| 43 |
+
return False
|
| 44 |
+
if not self._all_inputs_are_const(node.inputs[1:]) or self._is_graph_output(node, graph):
|
| 45 |
+
return False
|
| 46 |
+
if not self._all_inputs_are_const(dequant_node.inputs):
|
| 47 |
+
return False
|
| 48 |
+
if len(dequant_node.inputs[1].get_tensor_value(as_list=False).flatten()) != 1:
|
| 49 |
+
# If using per-channel quantization, we must compute the new axis
|
| 50 |
+
old_axis = dequant_node.get_attr_value("axis")
|
| 51 |
+
input_shape = dequant_node.inputs[0].get_tensor_value(as_list=False).shape
|
| 52 |
+
new_axis = self.compute_new_axis(node, graph, old_axis, input_shape)
|
| 53 |
+
if new_axis is None:
|
| 54 |
+
return False
|
| 55 |
+
dequant_node.set_attr("axis", new_axis)
|
| 56 |
+
graph.replace_input(node, node.input[0], dequant_node.input[0], 0)
|
| 57 |
+
const_outputs = ConstFoldOptimizer.compute_const_folding(node, graph)
|
| 58 |
+
graph.replace_all_inputs(node.output[0], dequant_node.output[0])
|
| 59 |
+
graph.remove_node(node.name)
|
| 60 |
+
dequant_const = dequant_node.inputs[0]
|
| 61 |
+
if len(graph.find_output_consumers(dequant_const.output[0])) > 1:
|
| 62 |
+
dequant_const = graph.copy_const(dequant_const)
|
| 63 |
+
graph.replace_input(dequant_node, dequant_node.input[0], dequant_const.output[0], 0)
|
| 64 |
+
dequant_const.set_tensor_value(const_outputs[0])
|
| 65 |
+
return True
|
| 66 |
+
|
| 67 |
+
@staticmethod
|
| 68 |
+
def _all_inputs_are_const(nodes):
|
| 69 |
+
return all(node.is_const() for node in nodes if node)
|
| 70 |
+
|
| 71 |
+
@staticmethod
|
| 72 |
+
def _is_graph_output(node, graph):
|
| 73 |
+
node_out_set = set(node.output)
|
| 74 |
+
graph_out_set = set(graph.outputs)
|
| 75 |
+
return node_out_set.intersection(graph_out_set)
|
| 76 |
+
|
| 77 |
+
@staticmethod
|
| 78 |
+
def compute_new_axis(node, graph, old_axis, input_shape):
|
| 79 |
+
if old_axis < 0:
|
| 80 |
+
old_axis += len(input_shape)
|
| 81 |
+
if node.type == "Transpose":
|
| 82 |
+
perm = node.get_attr_value("perm")
|
| 83 |
+
if perm is None:
|
| 84 |
+
return None
|
| 85 |
+
return perm.index(old_axis)
|
| 86 |
+
if node.type == "Reshape":
|
| 87 |
+
prod = 1
|
| 88 |
+
for d in input_shape[:old_axis+1]:
|
| 89 |
+
prod *= d
|
| 90 |
+
new_shape = node.inputs[1].get_tensor_value(as_list=True)
|
| 91 |
+
new_prod = 1
|
| 92 |
+
for i, d in enumerate(new_shape):
|
| 93 |
+
new_prod *= d
|
| 94 |
+
if new_prod == prod:
|
| 95 |
+
if new_shape[i] == input_shape[old_axis]:
|
| 96 |
+
return i
|
| 97 |
+
return None
|
| 98 |
+
return None
|
| 99 |
+
if node.type == "Unsqueeze":
|
| 100 |
+
if graph.opset >= 13:
|
| 101 |
+
axes = node.inputs[1].get_tensor_value(as_list=True)
|
| 102 |
+
else:
|
| 103 |
+
axes = node.get_attr_value("axes")
|
| 104 |
+
new_rank = len(input_shape) + len(axes)
|
| 105 |
+
axes = [axis if axis >= 0 else axis + new_rank for axis in axes]
|
| 106 |
+
for i in range(new_rank):
|
| 107 |
+
if i not in axes:
|
| 108 |
+
if old_axis == 0:
|
| 109 |
+
return i
|
| 110 |
+
old_axis -= 1
|
| 111 |
+
return None
|
| 112 |
+
return None
|
lib/python3.10/site-packages/tf2onnx/optimizer/const_fold_optimizer.py
ADDED
|
@@ -0,0 +1,155 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
"""const fold Optimizer.
|
| 5 |
+
if op's inputs are all const then do op computation when building the graph to improve performance
|
| 6 |
+
for example, input of transpose node is const then we can do transpose statically instead of at runtime
|
| 7 |
+
"""
|
| 8 |
+
|
| 9 |
+
from .. import utils
|
| 10 |
+
from .optimizer_base import GraphOptimizerBase
|
| 11 |
+
|
| 12 |
+
# pylint: disable=logging-not-lazy,unused-argument,missing-docstring
|
| 13 |
+
|
| 14 |
+
# key is op_type, value is the function to compute outputs
|
| 15 |
+
# the schema of function is: inputs are(node, graph), output is a list of constant values.
|
| 16 |
+
_func_map = {}
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def _register_func(op_type):
|
| 20 |
+
def _internal_fun(func):
|
| 21 |
+
_func_map[op_type] = func
|
| 22 |
+
return func
|
| 23 |
+
|
| 24 |
+
return _internal_fun
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
class ConstFoldOptimizer(GraphOptimizerBase):
|
| 28 |
+
|
| 29 |
+
def __init__(self): # pylint: disable=useless-super-delegation
|
| 30 |
+
super(ConstFoldOptimizer, self).__init__()
|
| 31 |
+
|
| 32 |
+
def _optimize(self, graph):
|
| 33 |
+
return self._apply_optimization(graph, self._optimize_at_current_graph_level)
|
| 34 |
+
|
| 35 |
+
def _optimize_at_current_graph_level(self, graph):
|
| 36 |
+
graph_changed = True
|
| 37 |
+
while graph_changed:
|
| 38 |
+
graph_changed = False
|
| 39 |
+
ops = graph.get_nodes()
|
| 40 |
+
for op in ops:
|
| 41 |
+
if self._should_skip(op):
|
| 42 |
+
continue
|
| 43 |
+
if self._fold_node(op, graph):
|
| 44 |
+
graph_changed = True
|
| 45 |
+
self.graph_been_opt = True
|
| 46 |
+
return graph
|
| 47 |
+
|
| 48 |
+
@staticmethod
|
| 49 |
+
def _should_skip(node):
|
| 50 |
+
# only support onnx official op for now, op in other domain is not supported for now
|
| 51 |
+
if not utils.is_onnx_domain(node.domain):
|
| 52 |
+
return True
|
| 53 |
+
|
| 54 |
+
if node.is_const() or node.is_graph_input():
|
| 55 |
+
return True
|
| 56 |
+
|
| 57 |
+
skip_type = ["Identity", "DequantizeLinear"]
|
| 58 |
+
if node.type in skip_type:
|
| 59 |
+
return True
|
| 60 |
+
|
| 61 |
+
return False
|
| 62 |
+
|
| 63 |
+
def _fold_node(self, node, graph):
|
| 64 |
+
""" if node's input are all const and it's not graph's output then it can be fold.
|
| 65 |
+
if node can be fold True will be return indicating that graph is changed
|
| 66 |
+
"""
|
| 67 |
+
if self._all_inputs_are_const(node.inputs) and not self._is_graph_output(node, graph):
|
| 68 |
+
process_func = _func_map.get(node.type, None)
|
| 69 |
+
if process_func:
|
| 70 |
+
const_outputs = process_func(node, graph)
|
| 71 |
+
self._replace_node_with_const(node, graph, const_outputs)
|
| 72 |
+
return True
|
| 73 |
+
self.logger.debug("need to add function to fold op %s whose op_type is %s", node.name, node.type)
|
| 74 |
+
return False
|
| 75 |
+
|
| 76 |
+
@staticmethod
|
| 77 |
+
def compute_const_folding(node, graph):
|
| 78 |
+
return _func_map[node.type](node, graph)
|
| 79 |
+
|
| 80 |
+
@staticmethod
|
| 81 |
+
def _all_inputs_are_const(nodes):
|
| 82 |
+
return all(node.is_const() for node in nodes if node)
|
| 83 |
+
|
| 84 |
+
@staticmethod
|
| 85 |
+
def _is_graph_output(node, graph):
|
| 86 |
+
node_out_set = set(node.output)
|
| 87 |
+
graph_out_set = set(graph.outputs)
|
| 88 |
+
return node_out_set.intersection(graph_out_set)
|
| 89 |
+
|
| 90 |
+
@staticmethod
|
| 91 |
+
def _replace_node_with_const(node, graph, vals):
|
| 92 |
+
utils.make_sure(len(node.output) == len(vals), "length of node outputs and const vals should be same")
|
| 93 |
+
for old_input, val in zip(node.output, vals):
|
| 94 |
+
const_node = graph.make_const(utils.make_name("const_fold_opt"), val)
|
| 95 |
+
graph.set_dtype(const_node.output[0], utils.map_numpy_to_onnx_dtype(val.dtype))
|
| 96 |
+
graph.set_shape(const_node.output[0], val.shape)
|
| 97 |
+
graph.replace_all_inputs(old_input, const_node.output[0]) # ops=graph.get_nodes()
|
| 98 |
+
graph.remove_node(node.name)
|
| 99 |
+
|
| 100 |
+
@staticmethod
|
| 101 |
+
@_register_func("Cast")
|
| 102 |
+
def _fold_cast(node, graph):
|
| 103 |
+
const_val = node.inputs[0].get_tensor_value(as_list=False)
|
| 104 |
+
np_dtype = utils.ONNX_TO_NUMPY_DTYPE[node.get_attr("to").i]
|
| 105 |
+
const_val_after_cast = const_val.astype(np_dtype)
|
| 106 |
+
return [const_val_after_cast]
|
| 107 |
+
|
| 108 |
+
@staticmethod
|
| 109 |
+
@_register_func("Transpose")
|
| 110 |
+
def _fold_transpose(node, graph) -> list:
|
| 111 |
+
const_val = node.inputs[0].get_tensor_value(as_list=False)
|
| 112 |
+
perm_attr = node.get_attr("perm")
|
| 113 |
+
perm = perm_attr.ints if perm_attr else None
|
| 114 |
+
const_val_after_trans = const_val.transpose(perm)
|
| 115 |
+
return [const_val_after_trans]
|
| 116 |
+
|
| 117 |
+
@staticmethod
|
| 118 |
+
@_register_func("Reshape")
|
| 119 |
+
def _fold_reshape(node, graph):
|
| 120 |
+
const_val_data = node.inputs[0].get_tensor_value(as_list=False)
|
| 121 |
+
const_val_shape = node.inputs[1].get_tensor_value(as_list=True)
|
| 122 |
+
data_shape = const_val_data.shape
|
| 123 |
+
for i, dim in enumerate(const_val_shape):
|
| 124 |
+
if dim == 0:
|
| 125 |
+
# In ORT a dim of 0 means the shape stays the same.
|
| 126 |
+
const_val_shape[i] = data_shape[i]
|
| 127 |
+
const_val_after_trans = const_val_data.reshape(const_val_shape)
|
| 128 |
+
return [const_val_after_trans]
|
| 129 |
+
|
| 130 |
+
@staticmethod
|
| 131 |
+
@_register_func("Unsqueeze")
|
| 132 |
+
def _fold_unsqueeze(node, graph):
|
| 133 |
+
"""
|
| 134 |
+
numpy expand_dims only supports to unsqueeze one dim one time, so reshape is used to simplify the logic
|
| 135 |
+
"""
|
| 136 |
+
const_val = node.inputs[0].get_tensor_value(as_list=False)
|
| 137 |
+
if graph.opset >= 13:
|
| 138 |
+
axes = node.inputs[1].get_tensor_value(as_list=True)
|
| 139 |
+
else:
|
| 140 |
+
axes = list(node.get_attr("axes").ints)
|
| 141 |
+
shape_in = const_val.shape
|
| 142 |
+
dims_out = len(shape_in) + len(axes)
|
| 143 |
+
axes = [i if i >= 0 else i + dims_out for i in axes]
|
| 144 |
+
# calculate the shape of output accroding to onnx Unsqueeze's spec
|
| 145 |
+
# https://github.com/onnx/onnx/blob/master/docs/Operators.md#Unsqueeze
|
| 146 |
+
shape_in = iter(shape_in)
|
| 147 |
+
shape_out = [None] * dims_out
|
| 148 |
+
for ind in axes:
|
| 149 |
+
shape_out[ind] = 1
|
| 150 |
+
for ind, val in enumerate(shape_out):
|
| 151 |
+
if val is None:
|
| 152 |
+
shape_out[ind] = next(shape_in)
|
| 153 |
+
|
| 154 |
+
const_val_after_unsqueeze = const_val.reshape(shape_out)
|
| 155 |
+
return [const_val_after_unsqueeze]
|
lib/python3.10/site-packages/tf2onnx/optimizer/identity_optimizer.py
ADDED
|
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
"""Identity Optimizer.
|
| 5 |
+
Remove useless Identity node in graphs including subgraphs, but does not hurt model output names.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
from __future__ import unicode_literals
|
| 9 |
+
|
| 10 |
+
from .optimizer_base import GraphOptimizerBase
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
# pylint: disable=logging-not-lazy,unused-argument,missing-docstring,unused-variable,arguments-differ
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class IdentityOptimizer(GraphOptimizerBase):
|
| 17 |
+
"""Identity Optimizer."""
|
| 18 |
+
|
| 19 |
+
def __init__(self): # pylint: disable=useless-super-delegation
|
| 20 |
+
super(IdentityOptimizer, self).__init__()
|
| 21 |
+
|
| 22 |
+
def _optimize(self, graph):
|
| 23 |
+
return self._apply_optimization(graph, self._optimize_at_current_graph_level)
|
| 24 |
+
|
| 25 |
+
def _optimize_at_current_graph_level(self, g):
|
| 26 |
+
has_update = True
|
| 27 |
+
while has_update:
|
| 28 |
+
has_update = False
|
| 29 |
+
nodes = [n for n in g.get_nodes() if n.type == "Identity"]
|
| 30 |
+
for n in nodes:
|
| 31 |
+
if n.graph is None:
|
| 32 |
+
self.logger.debug("node has been removed from this graph, skip")
|
| 33 |
+
continue
|
| 34 |
+
|
| 35 |
+
graph_outputs = set(n.output).intersection(g.outputs)
|
| 36 |
+
ret = False
|
| 37 |
+
if graph_outputs:
|
| 38 |
+
ret = self._handle_graph_output_identity(g, n, graph_outputs)
|
| 39 |
+
else:
|
| 40 |
+
ret = self._handle_non_graph_output_identity(g, n)
|
| 41 |
+
has_update = ret
|
| 42 |
+
if ret:
|
| 43 |
+
self.graph_been_opt = True
|
| 44 |
+
return g
|
| 45 |
+
|
| 46 |
+
@staticmethod
|
| 47 |
+
def _handle_non_graph_output_identity(graph, identity):
|
| 48 |
+
old_name = identity.output[0]
|
| 49 |
+
new_name = identity.input[0]
|
| 50 |
+
graph.replace_all_inputs(old_name, new_name, ops=graph.get_nodes())
|
| 51 |
+
graph.remove_node(identity.name)
|
| 52 |
+
return True
|
| 53 |
+
|
| 54 |
+
def _handle_graph_output_identity(self, graph, identity, graph_outputs):
|
| 55 |
+
input_id = identity.input[0]
|
| 56 |
+
input_node = identity.inputs[0]
|
| 57 |
+
|
| 58 |
+
if input_node.graph != graph:
|
| 59 |
+
# If input node is in parent graph, we don't handle it now
|
| 60 |
+
self.logger.debug("input node in parent graph, skip")
|
| 61 |
+
return False
|
| 62 |
+
|
| 63 |
+
if input_node.is_graph_input():
|
| 64 |
+
# Identity between input and output should not be removed.
|
| 65 |
+
self.logger.debug("skip identity between input and output")
|
| 66 |
+
return False
|
| 67 |
+
|
| 68 |
+
output_id = identity.output[0]
|
| 69 |
+
output_shape = graph.get_shape(output_id)
|
| 70 |
+
output_dtype = graph.get_dtype(output_id)
|
| 71 |
+
if input_id in graph.outputs:
|
| 72 |
+
# input id already be graph output, so we cannot make that be another graph output.
|
| 73 |
+
# this Identity must be kept.
|
| 74 |
+
self.logger.debug("identity input already be graph output")
|
| 75 |
+
return False
|
| 76 |
+
|
| 77 |
+
graph.remove_node(identity.name)
|
| 78 |
+
new_output = [output_id if o == input_id else o for o in input_node.output]
|
| 79 |
+
input_node.output = new_output
|
| 80 |
+
|
| 81 |
+
graph.set_shape(output_id, output_shape)
|
| 82 |
+
graph.set_dtype(output_id, output_dtype)
|
| 83 |
+
|
| 84 |
+
graph.replace_all_inputs(input_id, output_id, ops=graph.get_nodes())
|
| 85 |
+
return True
|
lib/python3.10/site-packages/tf2onnx/optimizer/loop_optimizer.py
ADDED
|
@@ -0,0 +1,90 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
"""Loop Optimizer.
|
| 5 |
+
some op in loop's body graph can be moved out to the loop
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
from tf2onnx.utils import make_name, make_sure
|
| 9 |
+
from .optimizer_base import GraphOptimizerBase
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
# pylint: disable=logging-not-lazy,unused-argument,missing-docstring,unused-variable,arguments-differ
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class LoopOptimizer(GraphOptimizerBase):
|
| 16 |
+
"""Loop Optimizer."""
|
| 17 |
+
|
| 18 |
+
# a lot of terms used here come from loop's onnx spec
|
| 19 |
+
# https://github.com/onnx/onnx/blob/master/docs/Operators.md#Loop
|
| 20 |
+
def __init__(self): # pylint: disable=useless-super-delegation
|
| 21 |
+
super(LoopOptimizer, self).__init__()
|
| 22 |
+
|
| 23 |
+
def _optimize(self, graph):
|
| 24 |
+
return self._apply_optimization(graph, self._optimize_at_current_graph_level)
|
| 25 |
+
|
| 26 |
+
def _optimize_at_current_graph_level(self, g):
|
| 27 |
+
has_update = True
|
| 28 |
+
while has_update:
|
| 29 |
+
has_update = False
|
| 30 |
+
nodes = [n for n in g.get_nodes() if n.type == "Loop"]
|
| 31 |
+
for n in nodes:
|
| 32 |
+
has_update_tmp = self._try_move_transpose_out_of_body_graph(n)
|
| 33 |
+
if has_update_tmp:
|
| 34 |
+
has_update = True
|
| 35 |
+
self.graph_been_opt = True
|
| 36 |
+
return g
|
| 37 |
+
|
| 38 |
+
@staticmethod
|
| 39 |
+
def consumer_nodes_num(graph, node):
|
| 40 |
+
make_sure(len(node.output) == 1, "only consider node with only one output")
|
| 41 |
+
res = len(graph.find_output_consumers(node.output[0]))
|
| 42 |
+
return res
|
| 43 |
+
|
| 44 |
+
def _try_move_transpose_out_of_body_graph(self, loop_node):
|
| 45 |
+
# output node of body graph can be loop-carried-dependent, if so it can't be move out of the body graph
|
| 46 |
+
# return True if moving some nodes successfully
|
| 47 |
+
# for now, we only consider moving transpose
|
| 48 |
+
body_graph = loop_node.get_body_graphs()["body"]
|
| 49 |
+
parent_graph = loop_node.graph
|
| 50 |
+
scan_nodes_name_in_body, scan_node_in_parent = self._scan_outputs(loop_node)
|
| 51 |
+
scan_nodes = [body_graph.get_node_by_output(name) for name in scan_nodes_name_in_body]
|
| 52 |
+
graph_is_changed = False
|
| 53 |
+
for node, name_in_parent in zip(scan_nodes, scan_node_in_parent):
|
| 54 |
+
# 1 delete node in body graph if possible
|
| 55 |
+
# only consider two case: trans is output, or transpose > identity > output
|
| 56 |
+
need_process = False
|
| 57 |
+
if node.type == "Transpose" and self.consumer_nodes_num(body_graph, node) <= 1:
|
| 58 |
+
trans = node
|
| 59 |
+
new_output = node.input[0]
|
| 60 |
+
body_graph.remove_node(node.name)
|
| 61 |
+
need_process = True
|
| 62 |
+
elif node.type == "Identity" and node.inputs[0].type == "Transpose" \
|
| 63 |
+
and self.consumer_nodes_num(body_graph, node) <= 1\
|
| 64 |
+
and self.consumer_nodes_num(body_graph, node.inputs[0]) <= 1:
|
| 65 |
+
trans = node.inputs[0]
|
| 66 |
+
new_output = node.inputs[0].input[0]
|
| 67 |
+
body_graph.remove_node(node.inputs[0].name)
|
| 68 |
+
body_graph.remove_node(node.name)
|
| 69 |
+
need_process = True
|
| 70 |
+
|
| 71 |
+
if need_process:
|
| 72 |
+
# 2 correct body graph's output
|
| 73 |
+
body_outputs = body_graph.outputs
|
| 74 |
+
body_outputs[body_outputs.index(node.output[0])] = new_output
|
| 75 |
+
# 3 insert new node in parent graph
|
| 76 |
+
ori_perm = list(trans.get_attr("perm").ints)
|
| 77 |
+
new_perm = [0] + [i + 1 for i in ori_perm] # body output's rank is m > rank of loop's output is m+1
|
| 78 |
+
name = make_name("trans_moved_from_loop_body")
|
| 79 |
+
_ = parent_graph.insert_new_node_on_output("Transpose", name_in_parent, name, perm=new_perm)
|
| 80 |
+
graph_is_changed = True
|
| 81 |
+
|
| 82 |
+
return graph_is_changed
|
| 83 |
+
|
| 84 |
+
@classmethod
|
| 85 |
+
def _scan_outputs(cls, loop):
|
| 86 |
+
# loop has 2+N inputs; loop has N+K outputs;
|
| 87 |
+
# loop's body graph has 1+N+K outputs
|
| 88 |
+
loop_carried = len(loop.input) - 2
|
| 89 |
+
body_graph = loop.get_body_graphs()["body"]
|
| 90 |
+
return body_graph.outputs[loop_carried + 1:], loop.output[loop_carried:]
|
lib/python3.10/site-packages/tf2onnx/optimizer/merge_duplicated_nodes_optimizer.py
ADDED
|
@@ -0,0 +1,120 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
"""Merge Duplicated Nodes Optimizer.
|
| 5 |
+
Remove duplicate nodes except identity nodes which should be handled by identity optimizer.
|
| 6 |
+
for example, node a is input of node b and node c, and computation of node b, c are same such as "abs" op.
|
| 7 |
+
then b and c can be merged into one node to avoid duplicated computation
|
| 8 |
+
"""
|
| 9 |
+
|
| 10 |
+
from collections import defaultdict
|
| 11 |
+
|
| 12 |
+
import numpy as np
|
| 13 |
+
|
| 14 |
+
from .optimizer_base import GraphOptimizerBase
|
| 15 |
+
|
| 16 |
+
# pylint: disable=logging-not-lazy,unused-argument,missing-docstring
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
class MergeDuplicatedNodesOptimizer(GraphOptimizerBase):
|
| 20 |
+
"""Remove duplicate nodes.
|
| 21 |
+
"""
|
| 22 |
+
|
| 23 |
+
def __init__(self):
|
| 24 |
+
super(MergeDuplicatedNodesOptimizer, self).__init__()
|
| 25 |
+
# used internally
|
| 26 |
+
self._graph_can_be_optimized = True
|
| 27 |
+
|
| 28 |
+
def _optimize(self, graph):
|
| 29 |
+
return self._apply_optimization(graph, self._optimize_at_current_graph_level)
|
| 30 |
+
|
| 31 |
+
def _optimize_at_current_graph_level(self, graph):
|
| 32 |
+
while self._graph_can_be_optimized:
|
| 33 |
+
self._graph_can_be_optimized = False
|
| 34 |
+
self._merge_duplicated_nodes(graph)
|
| 35 |
+
if self._graph_can_be_optimized:
|
| 36 |
+
self.graph_been_opt = True
|
| 37 |
+
return graph
|
| 38 |
+
|
| 39 |
+
def _merge_duplicated_nodes(self, graph):
|
| 40 |
+
# "duplicated" means: op_type, input and attribute are same
|
| 41 |
+
# while attr is un-hashable so doesn't include it when grouping nodes
|
| 42 |
+
# we do hash the tensor data of const values
|
| 43 |
+
nodes_groups = self._group_nodes_by_type_inputs(graph)
|
| 44 |
+
for _, nodes_group in nodes_groups.items():
|
| 45 |
+
if self._skip_node_type(nodes_group[0]):
|
| 46 |
+
continue
|
| 47 |
+
self._del_nodes_if_duplicated(nodes_group, graph)
|
| 48 |
+
|
| 49 |
+
@staticmethod
|
| 50 |
+
def _group_nodes_by_type_inputs(graph):
|
| 51 |
+
res = defaultdict(list)
|
| 52 |
+
for node in graph.get_nodes():
|
| 53 |
+
# default const of graph input cannot be merged
|
| 54 |
+
if node.is_graph_input_default_const():
|
| 55 |
+
continue
|
| 56 |
+
tensor_data_hash = None
|
| 57 |
+
if node.is_const():
|
| 58 |
+
# Many constants have the same size so this is helpful
|
| 59 |
+
tensor_data_hash = hash(node.attr['value'].t.raw_data)
|
| 60 |
+
res[(node.type, tuple(node.input), tensor_data_hash)].append(node)
|
| 61 |
+
return res
|
| 62 |
+
|
| 63 |
+
def _del_nodes_if_duplicated(self, nodes_group, graph):
|
| 64 |
+
# input and op type of nodes in same group are same,
|
| 65 |
+
# and if their attributes are also same then they are duplicated
|
| 66 |
+
while len(nodes_group) > 1:
|
| 67 |
+
unprocessed_node = []
|
| 68 |
+
nodes_to_process = [nodes_group[0]]
|
| 69 |
+
for node in nodes_group[1:]:
|
| 70 |
+
if self._have_equal_attr(node, nodes_to_process[0], graph):
|
| 71 |
+
nodes_to_process.append(node)
|
| 72 |
+
else:
|
| 73 |
+
unprocessed_node.append(node)
|
| 74 |
+
|
| 75 |
+
self._merge_nodes_that_are_duplicated(nodes_to_process, graph)
|
| 76 |
+
nodes_group = unprocessed_node
|
| 77 |
+
|
| 78 |
+
def _have_equal_attr(self, node_1, node_2, graph):
|
| 79 |
+
if node_1.attr == node_2.attr:
|
| 80 |
+
return True
|
| 81 |
+
# consts have a name attr that can differ among equal consts so they must be handled separately
|
| 82 |
+
if node_1.is_const() and node_2.is_const():
|
| 83 |
+
# get_tensor_value is costly so that we check their shape first
|
| 84 |
+
shape_1 = graph.get_shape(node_1.output[0])
|
| 85 |
+
shape_2 = graph.get_shape(node_2.output[0])
|
| 86 |
+
if shape_1 is not None and shape_2 is not None and \
|
| 87 |
+
shape_1 != shape_2:
|
| 88 |
+
return False
|
| 89 |
+
const_1 = node_1.get_tensor_value(as_list=False)
|
| 90 |
+
const_2 = node_2.get_tensor_value(as_list=False)
|
| 91 |
+
if const_1.dtype == const_2.dtype and \
|
| 92 |
+
np.array_equal(const_1, const_2):
|
| 93 |
+
return True
|
| 94 |
+
return False
|
| 95 |
+
|
| 96 |
+
def _merge_nodes_that_are_duplicated(self, nodes_to_process, graph):
|
| 97 |
+
# node's output may not all be used, so have to select the one that uses most of node's outputs
|
| 98 |
+
nodes_to_process.sort(key=self._len_of_node_output, reverse=True)
|
| 99 |
+
node_to_retain = nodes_to_process[0]
|
| 100 |
+
for node_to_delete in nodes_to_process[1:]:
|
| 101 |
+
# if one of the output is graph's output then it can't be deleted
|
| 102 |
+
if set(node_to_delete.output).intersection(set(graph.outputs)):
|
| 103 |
+
continue
|
| 104 |
+
for old_input, new_input in zip(node_to_delete.output, node_to_retain.output):
|
| 105 |
+
graph.replace_all_inputs(old_input, new_input)
|
| 106 |
+
graph.remove_node(node_to_delete.name)
|
| 107 |
+
self._graph_can_be_optimized = True
|
| 108 |
+
|
| 109 |
+
@staticmethod
|
| 110 |
+
def _skip_node_type(node):
|
| 111 |
+
# identity node will be handled by identity optimizer so skip it
|
| 112 |
+
if node.type in ["Identity"]:
|
| 113 |
+
return True
|
| 114 |
+
if node.is_graph_input():
|
| 115 |
+
return True
|
| 116 |
+
return False
|
| 117 |
+
|
| 118 |
+
@staticmethod
|
| 119 |
+
def _len_of_node_output(node):
|
| 120 |
+
return len(node.output)
|
lib/python3.10/site-packages/tf2onnx/optimizer/optimizer_base.py
ADDED
|
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
"""Graph Optimizer Base"""
|
| 5 |
+
|
| 6 |
+
from __future__ import unicode_literals
|
| 7 |
+
|
| 8 |
+
import copy
|
| 9 |
+
|
| 10 |
+
from .. import logging, utils
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class GraphOptimizerBase(object):
|
| 14 |
+
"""optimizer graph to improve performance
|
| 15 |
+
"""
|
| 16 |
+
|
| 17 |
+
def __init__(self):
|
| 18 |
+
self._logger = logging.getLogger('.'.join(__name__.split('.')[:-1] + [self.__class__.__name__]))
|
| 19 |
+
self._graph_been_opt = False
|
| 20 |
+
|
| 21 |
+
@property
|
| 22 |
+
def logger(self):
|
| 23 |
+
return self._logger
|
| 24 |
+
|
| 25 |
+
@property
|
| 26 |
+
def is_debug_mode(self):
|
| 27 |
+
return utils.is_debug_mode()
|
| 28 |
+
|
| 29 |
+
@property
|
| 30 |
+
def graph_been_opt(self):
|
| 31 |
+
return self._graph_been_opt
|
| 32 |
+
|
| 33 |
+
@graph_been_opt.setter
|
| 34 |
+
def graph_been_opt(self, value):
|
| 35 |
+
self._graph_been_opt = value
|
| 36 |
+
|
| 37 |
+
def optimize(self, graph):
|
| 38 |
+
""" Optimize graph, return optimized graph. """
|
| 39 |
+
before = graph.dump_node_statistics()
|
| 40 |
+
|
| 41 |
+
graph = self._optimize(graph)
|
| 42 |
+
graph.update_proto()
|
| 43 |
+
graph.delete_unused_nodes(graph.outputs)
|
| 44 |
+
|
| 45 |
+
after = graph.dump_node_statistics()
|
| 46 |
+
self._print_stat_diff(before, after)
|
| 47 |
+
return graph
|
| 48 |
+
|
| 49 |
+
def _optimize(self, graph):
|
| 50 |
+
""" Derived class should override this function. """
|
| 51 |
+
raise NotImplementedError
|
| 52 |
+
|
| 53 |
+
@staticmethod
|
| 54 |
+
def _apply_optimization(graph, optimize_func):
|
| 55 |
+
"""
|
| 56 |
+
optimize graph
|
| 57 |
+
will also optimize graph of nodes'
|
| 58 |
+
Args:
|
| 59 |
+
graph: the top level graph to be optimized
|
| 60 |
+
optimize_func: function to optimize graph
|
| 61 |
+
"""
|
| 62 |
+
graph = optimize_func(graph)
|
| 63 |
+
for node in graph.get_nodes():
|
| 64 |
+
body_graphs = node.get_body_graphs()
|
| 65 |
+
if body_graphs:
|
| 66 |
+
for attr, b_g in body_graphs.items():
|
| 67 |
+
b_g = GraphOptimizerBase._apply_optimization(b_g, optimize_func)
|
| 68 |
+
node.set_body_graph_as_attr(attr, b_g)
|
| 69 |
+
return graph
|
| 70 |
+
|
| 71 |
+
def _print_stat_diff(self, before, after):
|
| 72 |
+
diff = copy.deepcopy(after)
|
| 73 |
+
diff.subtract(before)
|
| 74 |
+
diff = ["{} {} ({}->{})".format(k, str(v) if v < 0 else '+' + str(v), before.get(k, 0), after.get(k, 0))
|
| 75 |
+
for k, v in sorted(diff.items()) if v != 0]
|
| 76 |
+
self.logger.verbose(', '.join(diff) if diff else "no change")
|
lib/python3.10/site-packages/tf2onnx/optimizer/transpose_optimizer.py
ADDED
|
@@ -0,0 +1,829 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
"""Transpose Optimizer."""
|
| 5 |
+
|
| 6 |
+
from __future__ import unicode_literals
|
| 7 |
+
from collections import defaultdict
|
| 8 |
+
|
| 9 |
+
import numpy as np
|
| 10 |
+
import onnx
|
| 11 |
+
from tf2onnx.constants import NCHW_TO_NHWC, NHWC_TO_NCHW, NCDHW_TO_NDHWC, NDHWC_TO_NCDHW
|
| 12 |
+
from .. import utils
|
| 13 |
+
from .optimizer_base import GraphOptimizerBase
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
# pylint: disable=logging-not-lazy,unused-argument,missing-docstring,abstract-method
|
| 17 |
+
# FIXME:
|
| 18 |
+
# pylint: disable=unused-variable
|
| 19 |
+
|
| 20 |
+
def is_nhwc_transpose(transpose_node):
|
| 21 |
+
perm_attr = transpose_node.get_attr('perm')
|
| 22 |
+
return transpose_node.type == "Transpose" and perm_attr and perm_attr.ints in [NCHW_TO_NHWC, NCDHW_TO_NDHWC]
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def is_nchw_transpose(transpose_node):
|
| 26 |
+
perm_attr = transpose_node.get_attr('perm')
|
| 27 |
+
return transpose_node.type == "Transpose" and perm_attr and perm_attr.ints in [NHWC_TO_NCHW, NDHWC_TO_NCDHW]
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def is_useless_transpose(transpose_node):
|
| 31 |
+
perm_attr = transpose_node.get_attr('perm')
|
| 32 |
+
return transpose_node.type == "Transpose" and perm_attr and perm_attr.ints == list(range(len(perm_attr.ints)))
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def get_transpose_rank(trans):
|
| 36 |
+
return len(trans.get_attr('perm').ints)
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
class TransposeOptimizer(GraphOptimizerBase):
|
| 40 |
+
"""Transpose Optimizer."""
|
| 41 |
+
|
| 42 |
+
def __init__(self):
|
| 43 |
+
super(TransposeOptimizer, self).__init__()
|
| 44 |
+
|
| 45 |
+
self._handler_map = {}
|
| 46 |
+
self._force_stop = {}
|
| 47 |
+
|
| 48 |
+
self._initialize_handlers()
|
| 49 |
+
self._g = None
|
| 50 |
+
self._output_names = None
|
| 51 |
+
|
| 52 |
+
@property
|
| 53 |
+
def nodes(self):
|
| 54 |
+
return self._g.get_nodes()
|
| 55 |
+
|
| 56 |
+
def pre_optimize_action(self):
|
| 57 |
+
# make Reshape into a const, which then can be fused into Conv's weight for mobilenet_v1_75_192
|
| 58 |
+
self._output_names = [self._g.get_node_by_output(out).name for out in self._g.outputs]
|
| 59 |
+
ops = self.nodes
|
| 60 |
+
constable_reshape_ops = [n for n in ops
|
| 61 |
+
if (n.type == "Reshape"
|
| 62 |
+
and n.inputs[0].is_const()
|
| 63 |
+
and n.inputs[1].is_const())]
|
| 64 |
+
for reshape_op in constable_reshape_ops:
|
| 65 |
+
target_t = reshape_op.inputs[0].get_tensor_value(as_list=False)
|
| 66 |
+
target_shape = reshape_op.inputs[1].get_tensor_value(as_list=True)
|
| 67 |
+
for i, dim in enumerate(target_shape):
|
| 68 |
+
if dim == 0:
|
| 69 |
+
# In ORT a dim of 0 means the shape stays the same.
|
| 70 |
+
target_shape[i] = target_t.shape[i]
|
| 71 |
+
new_data = np.reshape(target_t, target_shape)
|
| 72 |
+
const_name = reshape_op.output[0]
|
| 73 |
+
self._g.remove_node(reshape_op.name)
|
| 74 |
+
self._g.make_const(const_name, new_data)
|
| 75 |
+
|
| 76 |
+
# point all children nodes inputs to the new node
|
| 77 |
+
for output_name in reshape_op.output:
|
| 78 |
+
for child in ops:
|
| 79 |
+
for i, name in enumerate(child.input):
|
| 80 |
+
if name == output_name:
|
| 81 |
+
child.input[i] = const_name
|
| 82 |
+
|
| 83 |
+
self._g.topological_sort(self._g.get_nodes())
|
| 84 |
+
|
| 85 |
+
def post_optimize_action(self):
|
| 86 |
+
def _calculate_new_shape(graph, op):
|
| 87 |
+
input_shape = graph.get_shape(op.input[0])
|
| 88 |
+
if input_shape.count(-1) <= 1:
|
| 89 |
+
if is_nchw_transpose(op):
|
| 90 |
+
new_shape = [input_shape[0], input_shape[-1]] + input_shape[1:-1]
|
| 91 |
+
else:
|
| 92 |
+
new_shape = [input_shape[0]] + input_shape[2:] + [input_shape[1]]
|
| 93 |
+
return graph.make_const(utils.make_name("new_shape"), np.array(new_shape, dtype=np.int64)).output[0]
|
| 94 |
+
|
| 95 |
+
# reshape requires tha output shape can only contain one -1, if not some extra op needed.
|
| 96 |
+
input_shape = graph.make_node("Shape", [op.input[0]]).output[0]
|
| 97 |
+
indice = graph.make_const(utils.make_name("indice"), np.array(op.get_attr('perm').ints)).output[0]
|
| 98 |
+
|
| 99 |
+
return graph.make_node("Gather", [input_shape, indice]).output[0]
|
| 100 |
+
|
| 101 |
+
nodes = self.nodes
|
| 102 |
+
# if channel==1 or height==width==1, replace transpose with reshape
|
| 103 |
+
# replacing trans with reshape is because transpose will copy data even if this transpose doesn't nothing
|
| 104 |
+
need_sort = False
|
| 105 |
+
for op in nodes:
|
| 106 |
+
if op.type == "Transpose":
|
| 107 |
+
input_shape = self._g.get_shape(op.input[0])
|
| 108 |
+
if not input_shape:
|
| 109 |
+
continue
|
| 110 |
+
|
| 111 |
+
if (is_nchw_transpose(op) and (input_shape[-1] == 1 or (np.all(np.array(input_shape[1:-1]) == 1)))) \
|
| 112 |
+
or (is_nhwc_transpose(op) and (input_shape[1] == 1 or (np.all(np.array(input_shape[2:]) == 1)))):
|
| 113 |
+
new_shape = _calculate_new_shape(self._g, op)
|
| 114 |
+
# replace transpose with reshape
|
| 115 |
+
self._g.remove_node(op.name)
|
| 116 |
+
self._g.make_node("Reshape", [op.input[0], new_shape], name=op.name, outputs=op.output)
|
| 117 |
+
need_sort = True
|
| 118 |
+
if need_sort:
|
| 119 |
+
self._g.topological_sort(self._g.get_nodes())
|
| 120 |
+
|
| 121 |
+
def merge_duplicated_transposes(self):
|
| 122 |
+
# strategy used in previous procedure is to move transpose nodes down if possible,
|
| 123 |
+
# and it means that when a node has n outputs then n transpose will be generated,
|
| 124 |
+
# so we should merge them back to one if they can't be eliminated in previous procedure.
|
| 125 |
+
graph = self._g
|
| 126 |
+
input_transposes_map = defaultdict(list)
|
| 127 |
+
for node in graph.get_nodes():
|
| 128 |
+
if node.type == "Transpose" and node.get_attr("perm"):
|
| 129 |
+
key = (node.input[0], str(node.get_attr("perm").ints))
|
| 130 |
+
input_transposes_map[key].append(node)
|
| 131 |
+
|
| 132 |
+
for transposes in input_transposes_map.values():
|
| 133 |
+
# merge transpose nodes into one: make nodes use the output of the first transpose node
|
| 134 |
+
transpose_out = transposes[0].output[0]
|
| 135 |
+
for node in transposes[1:]:
|
| 136 |
+
old_transpose_out = node.output[0]
|
| 137 |
+
graph.replace_all_inputs(old_transpose_out, transpose_out) # ops=graph.get_nodes()
|
| 138 |
+
|
| 139 |
+
# dangling transpose nodes can be deleted
|
| 140 |
+
graph.delete_unused_nodes(graph.outputs)
|
| 141 |
+
|
| 142 |
+
def _optimize(self, graph):
|
| 143 |
+
return self._apply_optimization(graph, self._optimize_at_current_graph_level)
|
| 144 |
+
|
| 145 |
+
def _optimize_at_current_graph_level(self, graph):
|
| 146 |
+
self._g = graph
|
| 147 |
+
self.pre_optimize_action()
|
| 148 |
+
no_action = False
|
| 149 |
+
iteration_cnt = 0
|
| 150 |
+
while not no_action:
|
| 151 |
+
no_action = True
|
| 152 |
+
nodes = self.nodes
|
| 153 |
+
self._force_stop = {}
|
| 154 |
+
for n in nodes:
|
| 155 |
+
if is_nhwc_transpose(n):
|
| 156 |
+
if self._handle_nhwc_tranpose(n):
|
| 157 |
+
no_action = False
|
| 158 |
+
self.graph_been_opt = True
|
| 159 |
+
iteration_cnt += 1
|
| 160 |
+
# need break, because handler may change nodes set, making the n stale object
|
| 161 |
+
# referencing already deleted elements
|
| 162 |
+
break
|
| 163 |
+
|
| 164 |
+
if is_useless_transpose(n):
|
| 165 |
+
no_action = False
|
| 166 |
+
iteration_cnt += 1
|
| 167 |
+
self._remove_useless_tranpose(n)
|
| 168 |
+
break
|
| 169 |
+
# for debugging purpose
|
| 170 |
+
if "stop" in self._force_stop and self._force_stop["stop"] == 1:
|
| 171 |
+
break
|
| 172 |
+
|
| 173 |
+
self.logger.debug("finish after " + str(iteration_cnt) + " iteration(s)")
|
| 174 |
+
|
| 175 |
+
self.merge_duplicated_transposes()
|
| 176 |
+
self.post_optimize_action()
|
| 177 |
+
return self._g
|
| 178 |
+
|
| 179 |
+
def _initialize_handlers(self):
|
| 180 |
+
self._handler_map = {
|
| 181 |
+
"Add": self._add_handler,
|
| 182 |
+
"ArgMax": self._arg_min_max_handler,
|
| 183 |
+
"ArgMin": self._arg_min_max_handler,
|
| 184 |
+
"Cast": self._simple_through_handler,
|
| 185 |
+
"Clip": self._simple_through_handler,
|
| 186 |
+
"Concat": self._concat_handler,
|
| 187 |
+
"Elu": self._simple_through_handler,
|
| 188 |
+
"Exp": self._simple_through_handler,
|
| 189 |
+
"Identity": self._identity_handler,
|
| 190 |
+
"LeakyRelu": self._simple_through_handler,
|
| 191 |
+
"Log": self._simple_through_handler,
|
| 192 |
+
"Max": self._maxmin_handler,
|
| 193 |
+
"Min": self._maxmin_handler,
|
| 194 |
+
"Mul": self._mul_handler,
|
| 195 |
+
"Pad": self._pad_handler,
|
| 196 |
+
"Reciprocal": self._simple_through_handler,
|
| 197 |
+
"ReduceLogSum": self._reduce_handler,
|
| 198 |
+
"ReduceLogSumExp": self._reduce_handler,
|
| 199 |
+
"ReduceMax": self._reduce_handler,
|
| 200 |
+
"ReduceMean": self._reduce_handler,
|
| 201 |
+
"ReduceMin": self._reduce_handler,
|
| 202 |
+
"ReduceProd": self._reduce_handler,
|
| 203 |
+
"ReduceSum": self._reducesum_handler,
|
| 204 |
+
"ReduceSumSquare": self._reduce_handler,
|
| 205 |
+
"Relu": self._simple_through_handler,
|
| 206 |
+
"Shape": self._shape_handler,
|
| 207 |
+
"Sigmoid": self._simple_through_handler,
|
| 208 |
+
"Sum": self._sum_handler,
|
| 209 |
+
"Slice": self._slice_handler,
|
| 210 |
+
"Split": self._split_handler,
|
| 211 |
+
"Softplus": self._simple_through_handler,
|
| 212 |
+
"Sqrt": self._simple_through_handler,
|
| 213 |
+
"Squeeze": self._squeeze_handler,
|
| 214 |
+
"Sub": self._sub_handler,
|
| 215 |
+
"Tanh": self._simple_through_handler,
|
| 216 |
+
"Transpose": self._transpose_handler,
|
| 217 |
+
"DequantizeLinear": self._quantize_handler,
|
| 218 |
+
"QuantizeLinear": self._quantize_handler,
|
| 219 |
+
}
|
| 220 |
+
|
| 221 |
+
def _handle_node_having_branches(self, trans, node):
|
| 222 |
+
trans_rank = get_transpose_rank(trans)
|
| 223 |
+
# create transpose pairs if some input are not.
|
| 224 |
+
if not self._create_transpose_pairs_before_node(trans_rank, node):
|
| 225 |
+
return False
|
| 226 |
+
# make sure node's all input transpose all have only 1 consumer node,
|
| 227 |
+
# otherwise, it would impact their other output nodes
|
| 228 |
+
if self._nodes_has_single_consumer_node(node.inputs) and len(node.output) == 1:
|
| 229 |
+
self._create_transpose_pairs_after_node(trans_rank, node)
|
| 230 |
+
input_transposes = set(node.inputs)
|
| 231 |
+
for n in input_transposes:
|
| 232 |
+
n_input = n.input[0]
|
| 233 |
+
utils.make_sure(len(n.output) == 1, "only expect single output")
|
| 234 |
+
self._g.replace_all_inputs(n.output[0], n_input) # ops=self._g.get_nodes()
|
| 235 |
+
self._g.remove_node(n.name)
|
| 236 |
+
|
| 237 |
+
utils.make_sure(len(node.output) == 1, "only expect single output")
|
| 238 |
+
# currently we assume node only has 1 output, for cases where it is more than 1 for example Split
|
| 239 |
+
# we need consider the fact that Split's multiple output will not always has data in NCHW/NHWC,
|
| 240 |
+
# it might be a different shape.
|
| 241 |
+
output_transposes = self._g.find_output_consumers(node.output[0])
|
| 242 |
+
for n in output_transposes:
|
| 243 |
+
n_input = n.input[0]
|
| 244 |
+
utils.make_sure(len(n.output) == 1, "only expect single output")
|
| 245 |
+
self._g.replace_all_inputs(n.output[0], n_input) # ops=self._g.get_nodes()
|
| 246 |
+
self._g.remove_node(n.name)
|
| 247 |
+
|
| 248 |
+
shape = self._g.get_shape(node.output[0])
|
| 249 |
+
if shape:
|
| 250 |
+
# only nhwc transpose can reach here
|
| 251 |
+
perm = NHWC_TO_NCHW if trans_rank == 4 else NDHWC_TO_NCDHW
|
| 252 |
+
new_shape = [shape[i] for i in perm]
|
| 253 |
+
self._g.set_shape(node.output[0], new_shape)
|
| 254 |
+
return True
|
| 255 |
+
|
| 256 |
+
self.logger.debug("input transpose does not have single consumer, skipping...")
|
| 257 |
+
return False
|
| 258 |
+
|
| 259 |
+
# get the input index of transpose op in node's inputs.
|
| 260 |
+
def _get_input_index_for_trans(self, node, trans):
|
| 261 |
+
input_index = 0
|
| 262 |
+
for i in node.input:
|
| 263 |
+
if i == trans.output[0]:
|
| 264 |
+
break
|
| 265 |
+
input_index += 1
|
| 266 |
+
return input_index
|
| 267 |
+
|
| 268 |
+
# the assumption is: both node and trans have only 1 output
|
| 269 |
+
def _switch_transpose_and_node(self, node, trans, update_shape=True):
|
| 270 |
+
if not self._nodes_has_single_consumer_node([trans]):
|
| 271 |
+
return False
|
| 272 |
+
|
| 273 |
+
input_index = self._get_input_index_for_trans(node, trans)
|
| 274 |
+
|
| 275 |
+
self._g.replace_all_inputs(node.output[0], trans.output[0]) # ops=self._g.get_nodes()
|
| 276 |
+
self._g.replace_input(node, node.input[input_index], trans.input[0], input_index)
|
| 277 |
+
self._g.replace_input(trans, trans.input[0], node.output[0], 0)
|
| 278 |
+
|
| 279 |
+
# need to transpose node shape in backward direction as well after switch
|
| 280 |
+
# otherwise, reshape added in post_optimize_action may not work correctly
|
| 281 |
+
shape = self._g.get_shape(node.output[0])
|
| 282 |
+
if update_shape and shape:
|
| 283 |
+
# only nhwc transpose can reach here
|
| 284 |
+
new_shape = [shape[i] for i in NHWC_TO_NCHW]
|
| 285 |
+
self._g.set_shape(node.output[0], new_shape)
|
| 286 |
+
return True
|
| 287 |
+
|
| 288 |
+
# if return value is True, then it means Transpose is handled as designed
|
| 289 |
+
# otherwise, it means that we skip handling since it is not in our support set
|
| 290 |
+
def _handle_nhwc_tranpose(self, trans):
|
| 291 |
+
if trans.output[0] in self._g.outputs:
|
| 292 |
+
self.logger.debug("%s connects to graph outputs, skip", trans.output[0])
|
| 293 |
+
return False
|
| 294 |
+
out_nodes = self._g.find_output_consumers(trans.output[0])
|
| 295 |
+
if len(out_nodes) == 1:
|
| 296 |
+
p = out_nodes[0]
|
| 297 |
+
if p.name in self._output_names:
|
| 298 |
+
self.logger.debug("cannot move transpose down since it met output node %s", p.name)
|
| 299 |
+
return False
|
| 300 |
+
|
| 301 |
+
if p.type in self._handler_map:
|
| 302 |
+
op_handler = self._handler_map[p.type]
|
| 303 |
+
return op_handler(trans, p)
|
| 304 |
+
return False
|
| 305 |
+
if out_nodes:
|
| 306 |
+
# move transpose into branches to let Transposes can be "handled" in each branch
|
| 307 |
+
for n in out_nodes:
|
| 308 |
+
branch_trans = n.graph.make_node("Transpose", [trans.input[0]], attr=trans.get_onnx_attrs())
|
| 309 |
+
n.graph.replace_input(n, trans.output[0], branch_trans.output[0])
|
| 310 |
+
self._g.remove_node(trans.name)
|
| 311 |
+
return False
|
| 312 |
+
|
| 313 |
+
def _remove_useless_tranpose(self, trans):
|
| 314 |
+
self._g.replace_all_inputs(trans.output[0], trans.input[0]) # ops=self._g.get_nodes()
|
| 315 |
+
self._g.remove_node(trans.name)
|
| 316 |
+
|
| 317 |
+
def _nodes_has_single_consumer_node(self, nodes):
|
| 318 |
+
for n in nodes:
|
| 319 |
+
for output in n.output:
|
| 320 |
+
cnt = len(set(self._g.find_output_consumers(output)))
|
| 321 |
+
if cnt != 1:
|
| 322 |
+
return False
|
| 323 |
+
return True
|
| 324 |
+
|
| 325 |
+
def _get_non_nchw_transpose_output_nodes(self, node):
|
| 326 |
+
# we just support node having 1 output, we need consider cases where node has more than 1 outputs
|
| 327 |
+
assert len(node.output) == 1
|
| 328 |
+
non_nchw_tranpose_nodes = []
|
| 329 |
+
consumers = self._g.find_output_consumers(node.output[0])
|
| 330 |
+
for o in consumers:
|
| 331 |
+
if not is_nchw_transpose(o) and o not in non_nchw_tranpose_nodes:
|
| 332 |
+
non_nchw_tranpose_nodes.append(o)
|
| 333 |
+
return non_nchw_tranpose_nodes
|
| 334 |
+
|
| 335 |
+
def _create_transpose_pairs_after_node(self, trans_rank, node):
|
| 336 |
+
assert len(node.output) == 1 # just support node who has 1 output
|
| 337 |
+
non_nchw_trans_consumers = self._get_non_nchw_transpose_output_nodes(node)
|
| 338 |
+
# add Transpose(0, 3, 1, 2) and Transpose(0, 2, 3, 1) before each non_nchw_trans_consumers
|
| 339 |
+
for consumer in non_nchw_trans_consumers:
|
| 340 |
+
perms = (NHWC_TO_NCHW, NCHW_TO_NHWC) if trans_rank == 4 else (NDHWC_TO_NCDHW, NCDHW_TO_NDHWC)
|
| 341 |
+
nchw_node = self._g.make_node("Transpose", [node.output[0]], attr={"perm": perms[0]})
|
| 342 |
+
nhwc_node = self._g.make_node("Transpose", [nchw_node.output[0]], attr={"perm": perms[1]})
|
| 343 |
+
self._g.replace_input(consumer, node.output[0], nhwc_node.output[0])
|
| 344 |
+
|
| 345 |
+
def _create_transpose_pairs_before_node(self, trans_rank, node):
|
| 346 |
+
def shape_after_expand(ori_shape):
|
| 347 |
+
# according to broadcasting rule to expand shape to 4D while not tile the tensor here
|
| 348 |
+
# still count on the broadcasting op to tile the tensor
|
| 349 |
+
if ori_shape.count(-1) >= 2:
|
| 350 |
+
self.logger.warning("%s shape can contain one -1 at most, otherwise reshape op can't work", node.name)
|
| 351 |
+
return None
|
| 352 |
+
ori_rank = len(ori_shape)
|
| 353 |
+
new_shape = [1] * (trans_rank - ori_rank) + ori_shape
|
| 354 |
+
return new_shape
|
| 355 |
+
|
| 356 |
+
non_nhwc_trans_inputs = []
|
| 357 |
+
for input_id, n in zip(node.input, node.inputs):
|
| 358 |
+
if not is_nhwc_transpose(n):
|
| 359 |
+
# check in case node has two inputs coming from a same node output.
|
| 360 |
+
if [input_id, n] not in non_nhwc_trans_inputs:
|
| 361 |
+
non_nhwc_trans_inputs.append([input_id, n])
|
| 362 |
+
|
| 363 |
+
# add Transpose NHWC_TO_NCHW and Transpose NCHW_TO_NHWC before each non_nhwc_trans_consumers
|
| 364 |
+
shape_unknow = [input_id for input_id, _ in non_nhwc_trans_inputs if self._g.get_shape(input_id) is None]
|
| 365 |
+
if shape_unknow:
|
| 366 |
+
if self._g.opset <= 9:
|
| 367 |
+
msg = "%s 's shape is unknown, ConstantOfShape will be used which exists in version 9 or higher" \
|
| 368 |
+
"while graph's opset version is %s" % (shape_unknow, self._g.opset)
|
| 369 |
+
self.logger.warning(msg)
|
| 370 |
+
return False
|
| 371 |
+
|
| 372 |
+
for input_id, n in non_nhwc_trans_inputs:
|
| 373 |
+
shape = self._g.get_shape(input_id)
|
| 374 |
+
# if rank of n is not transpose rank, then we need to insert a reshape op before inserting a transpose
|
| 375 |
+
# for example shape of n is [x, y], then output shape of reshape will be [1, 1, x, y] or [1, 1, 1, x, y]
|
| 376 |
+
if shape is None:
|
| 377 |
+
const_4 = self._g.make_const(utils.make_name("const_4"), np.array([trans_rank], np.int64)).output[0]
|
| 378 |
+
tensor_1 = onnx.helper.make_tensor("value", onnx.TensorProto.INT64, [1], [1])
|
| 379 |
+
shape_node = self._g.make_node("Shape", [input_id]).output[0]
|
| 380 |
+
rank_node = self._g.make_node("Shape", [shape_node]).output[0]
|
| 381 |
+
expand_rank = self._g.make_node("Sub", [const_4, rank_node]).output[0]
|
| 382 |
+
array_fill_1 = self._g.make_node("ConstantOfShape", [expand_rank], attr={"value": tensor_1}).output[0]
|
| 383 |
+
new_shape = self._g.make_node("Concat", [array_fill_1, shape_node], attr={"axis": 0}).output[0]
|
| 384 |
+
reshape = self._g.make_node("Reshape", [input_id, new_shape]).output[0]
|
| 385 |
+
input_of_new_trans = reshape
|
| 386 |
+
elif len(shape) == trans_rank:
|
| 387 |
+
input_of_new_trans = input_id
|
| 388 |
+
else:
|
| 389 |
+
shape = shape_after_expand(shape)
|
| 390 |
+
if shape is None:
|
| 391 |
+
return False
|
| 392 |
+
const = self._g.make_const(utils.make_name("reshape_shape"), np.array(shape, np.int64)).output[0]
|
| 393 |
+
reshape = self._g.make_node("Reshape", [input_id, const]).output[0]
|
| 394 |
+
input_of_new_trans = reshape
|
| 395 |
+
|
| 396 |
+
perms = (NHWC_TO_NCHW, NCHW_TO_NHWC) if trans_rank == 4 else (NDHWC_TO_NCDHW, NCDHW_TO_NDHWC)
|
| 397 |
+
nchw_node = self._g.make_node("Transpose", [input_of_new_trans], attr={"perm": perms[0]})
|
| 398 |
+
nhwc_node = self._g.make_node("Transpose", [nchw_node.output[0]], attr={"perm": perms[1]})
|
| 399 |
+
self._g.replace_input(node, input_id, nhwc_node.output[0])
|
| 400 |
+
return True
|
| 401 |
+
|
| 402 |
+
def _add_handler(self, trans, node):
|
| 403 |
+
if node.inputs[1].is_const():
|
| 404 |
+
t_p = trans.inputs[0]
|
| 405 |
+
if t_p.type in ("Conv", "ConvTranspose") and len(t_p.input) == 2:
|
| 406 |
+
# if Conv or ConvTranspose's bias input is not set, then we set, otherwise, we don't set
|
| 407 |
+
# todo: maybe we can add already set bias with the input??? try later
|
| 408 |
+
|
| 409 |
+
if not self._nodes_has_single_consumer_node([t_p]):
|
| 410 |
+
self.logger.debug("Conv does not have single consumer, can not merge Conv and Add")
|
| 411 |
+
return self._handle_node_having_branches(trans, node)
|
| 412 |
+
|
| 413 |
+
if not self._nodes_has_single_consumer_node([trans]):
|
| 414 |
+
self.logger.debug("input transpose does not have single consumer, skipping...")
|
| 415 |
+
return False
|
| 416 |
+
|
| 417 |
+
target_node = node.inputs[1]
|
| 418 |
+
numpy_val = target_node.get_tensor_value(as_list=False)
|
| 419 |
+
# Optional 1D bias to be added to the convolution, has size of M
|
| 420 |
+
if len(numpy_val.shape) - numpy_val.shape.count(1) > 1:
|
| 421 |
+
self.logger.debug("Bias is not 1D, can not merge Conv and Add")
|
| 422 |
+
return self._handle_node_having_branches(trans, node)
|
| 423 |
+
|
| 424 |
+
bias_size = max(numpy_val.shape)
|
| 425 |
+
size_m = t_p.inputs[1].output_shapes[0][0]
|
| 426 |
+
if bias_size != size_m:
|
| 427 |
+
self.logger.debug("Bias size is not M, can not merge Conv and Add")
|
| 428 |
+
return self._handle_node_having_branches(trans, node)
|
| 429 |
+
|
| 430 |
+
target_val = numpy_val.reshape(bias_size)
|
| 431 |
+
target_node.set_tensor_value(target_val)
|
| 432 |
+
|
| 433 |
+
conv_inputs = [t_p.input[0], t_p.input[1], node.input[1]]
|
| 434 |
+
conv_node = self._g.make_node(t_p.type, conv_inputs, attr=t_p.get_onnx_attrs())
|
| 435 |
+
self._g.replace_input(trans, trans.input[0], utils.port_name(conv_node.name), 0)
|
| 436 |
+
self._g.replace_all_inputs(node.output[0], trans.output[0]) # ops=self._g.get_nodes()
|
| 437 |
+
self._g.remove_node(t_p.name)
|
| 438 |
+
self._g.remove_node(node.name)
|
| 439 |
+
return True
|
| 440 |
+
return self._handle_node_having_branches(trans, node)
|
| 441 |
+
|
| 442 |
+
def _transpose_handler(self, trans, node):
|
| 443 |
+
if is_nchw_transpose(node):
|
| 444 |
+
for g in {self._g, node.graph}:
|
| 445 |
+
g.replace_all_inputs(node.output[0], trans.input[0]) # ops=g.get_nodes()
|
| 446 |
+
|
| 447 |
+
shape = node.graph.get_shape(node.output[0])
|
| 448 |
+
dtype = node.graph.get_dtype(node.output[0])
|
| 449 |
+
if node.output[0] in node.graph.outputs:
|
| 450 |
+
node.graph.make_node("Identity", [trans.input[0]],
|
| 451 |
+
outputs=node.output, shapes=[shape], dtypes=[dtype])
|
| 452 |
+
self._g.remove_node(trans.name)
|
| 453 |
+
node.graph.remove_node(node.name)
|
| 454 |
+
return True
|
| 455 |
+
return False
|
| 456 |
+
|
| 457 |
+
def _maxmin_handler(self, trans, node):
|
| 458 |
+
return self._handle_node_having_branches(trans, node)
|
| 459 |
+
|
| 460 |
+
def _mul_handler(self, trans, node):
|
| 461 |
+
multiplier_input_id = None
|
| 462 |
+
multiplier_input_node = None
|
| 463 |
+
multiplier_input_idx = None
|
| 464 |
+
for idx, (input_id, input_node) in enumerate(zip(node.input, node.inputs)):
|
| 465 |
+
if input_id != trans.output[0]:
|
| 466 |
+
multiplier_input_id = input_id
|
| 467 |
+
multiplier_input_node = input_node
|
| 468 |
+
multiplier_input_idx = idx
|
| 469 |
+
|
| 470 |
+
# node's inputs may come from one same node. if so the multiplier_input_node may be none
|
| 471 |
+
if multiplier_input_node is None:
|
| 472 |
+
if not self._nodes_has_single_consumer_node([trans]):
|
| 473 |
+
return False
|
| 474 |
+
self._g.replace_all_inputs(node.output[0], trans.output[0])
|
| 475 |
+
self._g.replace_input(node, node.input[0], trans.input[0], 0)
|
| 476 |
+
self._g.replace_input(node, node.input[1], trans.input[0], 1)
|
| 477 |
+
self._g.replace_input(trans, trans.input[0], node.output[0], 0)
|
| 478 |
+
return True
|
| 479 |
+
|
| 480 |
+
# convert mul(trans(x), trans(y)) -> trans(mul(x, y))
|
| 481 |
+
if multiplier_input_node.type == "Transpose":
|
| 482 |
+
if is_nhwc_transpose(multiplier_input_node):
|
| 483 |
+
if not self._nodes_has_single_consumer_node([multiplier_input_node]):
|
| 484 |
+
return False
|
| 485 |
+
input_index = self._get_input_index_for_trans(node, multiplier_input_node)
|
| 486 |
+
if not self._switch_transpose_and_node(node, trans):
|
| 487 |
+
return False
|
| 488 |
+
|
| 489 |
+
self._g.replace_input(node, node.input[input_index], multiplier_input_node.input[0], input_index)
|
| 490 |
+
self._g.remove_node(multiplier_input_node.name)
|
| 491 |
+
return True
|
| 492 |
+
|
| 493 |
+
# handle const multipliers
|
| 494 |
+
if not multiplier_input_node.is_const():
|
| 495 |
+
return False
|
| 496 |
+
multiplier = multiplier_input_node.get_tensor_value(as_list=False)
|
| 497 |
+
|
| 498 |
+
# todo: apply this block if we have model case multiplier_input_id==0, and verify that.
|
| 499 |
+
if multiplier_input_id == node.input[1]:
|
| 500 |
+
t_p = trans.inputs[0]
|
| 501 |
+
trans_rank = get_transpose_rank(trans)
|
| 502 |
+
# make sure conv don't have bias set
|
| 503 |
+
if t_p.type == "Conv" and t_p.inputs[1].is_const() and len(t_p.input) == 2 and trans_rank == 4:
|
| 504 |
+
conv = t_p
|
| 505 |
+
numpy_val = conv.inputs[1].get_tensor_value(as_list=False)
|
| 506 |
+
transposed_val = np.transpose(numpy_val, (2, 3, 1, 0))
|
| 507 |
+
mul_val = multiplier
|
| 508 |
+
result = np.multiply(transposed_val, mul_val)
|
| 509 |
+
conv.inputs[1].set_tensor_value(np.transpose(result, (3, 2, 0, 1)))
|
| 510 |
+
|
| 511 |
+
self._g.replace_all_inputs(node.output[0], trans.output[0]) # ops=self._g.get_nodes()
|
| 512 |
+
self._g.remove_node(node.name)
|
| 513 |
+
return True
|
| 514 |
+
|
| 515 |
+
# if the shape is (), we just move transpose after the mul
|
| 516 |
+
if not multiplier.shape:
|
| 517 |
+
return self._switch_transpose_and_node(node, trans)
|
| 518 |
+
|
| 519 |
+
# if multiplier is 1-D
|
| 520 |
+
if len(multiplier.shape) == 1 and multiplier.shape[0] == 1:
|
| 521 |
+
# shape is (1)
|
| 522 |
+
return self._switch_transpose_and_node(node, trans)
|
| 523 |
+
|
| 524 |
+
# if multiplier has shape (N,) or (1, N) or (1, 1, N) ....
|
| 525 |
+
if np.prod(multiplier.shape) == multiplier.shape[-1]:
|
| 526 |
+
if not self._nodes_has_single_consumer_node([multiplier_input_node]):
|
| 527 |
+
new_inp = self._g.copy_const(multiplier_input_node)
|
| 528 |
+
self._g.replace_input(node, multiplier_input_id, new_inp.output[0], multiplier_input_idx)
|
| 529 |
+
multiplier_input_node = new_inp
|
| 530 |
+
perm = list(trans.get_attr('perm').ints)
|
| 531 |
+
new_shape = np.ones(len(perm), dtype=np.int32)
|
| 532 |
+
new_shape[perm[-1]] = multiplier.shape[-1]
|
| 533 |
+
multiplier_input_node.set_tensor_value(multiplier.reshape(new_shape))
|
| 534 |
+
return self._switch_transpose_and_node(node, trans)
|
| 535 |
+
|
| 536 |
+
return False
|
| 537 |
+
|
| 538 |
+
def _sum_handler(self, trans, node):
|
| 539 |
+
inputs = node.inputs
|
| 540 |
+
trans_shape = self._g.get_shape(trans.output[0])
|
| 541 |
+
perm = list(trans.get_attr('perm').ints)
|
| 542 |
+
untrans_idx = [perm.index(i) for i in range(len(perm))]
|
| 543 |
+
|
| 544 |
+
# check if sum(trans(x1), trans(x2), const(x3), ...) can be switched
|
| 545 |
+
for n in inputs:
|
| 546 |
+
if n.type not in ["Transpose", "Const"]:
|
| 547 |
+
return False
|
| 548 |
+
if not self._nodes_has_single_consumer_node([n]):
|
| 549 |
+
return False
|
| 550 |
+
if n.is_const():
|
| 551 |
+
# if graph is valid, op shapes should be valid
|
| 552 |
+
# const is special case, in case of broadcasting
|
| 553 |
+
# ensure rank matches
|
| 554 |
+
n_shape = self._g.get_shape(n.output[0])
|
| 555 |
+
if len(n_shape) != len(trans_shape):
|
| 556 |
+
return False
|
| 557 |
+
else:
|
| 558 |
+
if list(n.get_attr('perm').ints) != perm:
|
| 559 |
+
return False
|
| 560 |
+
|
| 561 |
+
# switch to trans(sum(x1, x2, x3, ...))
|
| 562 |
+
self._g.replace_all_inputs(node.output[0], trans.output[0]) # ops=self._g.get_nodes()
|
| 563 |
+
new_input = [n.output[0] if n.is_const() else n.input[0] for n in inputs]
|
| 564 |
+
self._g.replace_inputs(node, new_input)
|
| 565 |
+
self._g.replace_input(trans, trans.input[0], node.output[0], 0)
|
| 566 |
+
|
| 567 |
+
# adjust shape if present
|
| 568 |
+
shape = self._g.get_shape(node.output[0])
|
| 569 |
+
if shape:
|
| 570 |
+
self._g.set_shape(node.output[0], [shape[i] for i in untrans_idx])
|
| 571 |
+
|
| 572 |
+
# update constants, remove dangling transposes
|
| 573 |
+
for n in inputs:
|
| 574 |
+
if n.is_const():
|
| 575 |
+
val = n.get_tensor_value(as_list=False)
|
| 576 |
+
new_val = np.transpose(val, untrans_idx)
|
| 577 |
+
n.set_tensor_value(new_val)
|
| 578 |
+
elif n.name != trans.name:
|
| 579 |
+
self._g.remove_node(n.name)
|
| 580 |
+
return True
|
| 581 |
+
|
| 582 |
+
def _identity_handler(self, trans, node):
|
| 583 |
+
if node.output[0] in node.graph.outputs:
|
| 584 |
+
return False
|
| 585 |
+
for g in {self._g, node.graph}:
|
| 586 |
+
g.replace_all_inputs(node.output[0], trans.output[0]) # ops=g.get_nodes()
|
| 587 |
+
node.graph.remove_node(node.name)
|
| 588 |
+
return True
|
| 589 |
+
|
| 590 |
+
def _concat_handler(self, trans, node):
|
| 591 |
+
if self._handle_node_having_branches(trans, node):
|
| 592 |
+
perm = trans.get_attr_value("perm")
|
| 593 |
+
axis = node.get_attr_value("axis", 0)
|
| 594 |
+
new_axis = perm[axis]
|
| 595 |
+
node.set_attr("axis", new_axis)
|
| 596 |
+
return True
|
| 597 |
+
return False
|
| 598 |
+
|
| 599 |
+
def _split_handler(self, trans, node):
|
| 600 |
+
# Todo: need handle cases where Slit node has more than 1 outputs.
|
| 601 |
+
if self._handle_node_having_branches(trans, node):
|
| 602 |
+
node.set_attr("axis", 1)
|
| 603 |
+
return True
|
| 604 |
+
return False
|
| 605 |
+
|
| 606 |
+
def _squeeze_handler(self, trans, node):
|
| 607 |
+
trans_rank = get_transpose_rank(trans)
|
| 608 |
+
def _calculate_new_attr(ori_perm, ori_squeeze_axes):
|
| 609 |
+
ori_squeeze_axes = [i if i >= 0 else i + trans_rank for i in ori_squeeze_axes]
|
| 610 |
+
new_squeeze_axes = sorted([ori_perm[i] for i in ori_squeeze_axes])
|
| 611 |
+
# calculate output shape after trans and squeeze
|
| 612 |
+
n = len(ori_perm)
|
| 613 |
+
input_shape = list(range(n))
|
| 614 |
+
shape_after_trans = [input_shape[i] for i in ori_perm]
|
| 615 |
+
output_shape = [shape_after_trans[i] for i in range(n) if i not in ori_squeeze_axes]
|
| 616 |
+
# calculate new_perm
|
| 617 |
+
# after switch, the output shape should be same, using this condtion we can figure the new perm
|
| 618 |
+
shape_after_squeeze = [input_shape[i] for i in range(n) if i not in new_squeeze_axes]
|
| 619 |
+
new_perm = [shape_after_squeeze.index(i) for i in output_shape]
|
| 620 |
+
|
| 621 |
+
return new_perm, new_squeeze_axes
|
| 622 |
+
|
| 623 |
+
if not self._nodes_has_single_consumer_node([trans]):
|
| 624 |
+
return False
|
| 625 |
+
|
| 626 |
+
axes = None
|
| 627 |
+
# in opset 13, axes is an input not attr
|
| 628 |
+
if node.get_attr("axes"):
|
| 629 |
+
axes = node.get_attr("axes").ints
|
| 630 |
+
if len(node.input) > 1 and node.inputs[1].is_const():
|
| 631 |
+
axes = node.inputs[1].get_tensor_value(as_list=True)
|
| 632 |
+
|
| 633 |
+
if axes is not None:
|
| 634 |
+
# switch tran and squeeze
|
| 635 |
+
# 1 switch
|
| 636 |
+
self._g.replace_all_inputs(node.output[0], trans.output[0]) # ops=self._g.get_nodes()
|
| 637 |
+
self._g.replace_input(node, node.input[0], trans.input[0], 0)
|
| 638 |
+
self._g.replace_input(trans, trans.input[0], node.output[0], 0)
|
| 639 |
+
# 2 correct attr of nodes
|
| 640 |
+
squeeze_axes = sorted(axes)
|
| 641 |
+
trans_perm = list(trans.get_attr("perm").ints)
|
| 642 |
+
new_perm, new_squeeze_axes = _calculate_new_attr(ori_perm=trans_perm, ori_squeeze_axes=squeeze_axes)
|
| 643 |
+
trans.set_attr("perm", new_perm)
|
| 644 |
+
if self._g.opset <= 12:
|
| 645 |
+
node.set_attr("axes", new_squeeze_axes)
|
| 646 |
+
else:
|
| 647 |
+
new_axes_np = np.array(new_squeeze_axes, dtype=np.int64)
|
| 648 |
+
new_axes_const = self._g.make_const(utils.make_name(node.inputs[1].name), new_axes_np)
|
| 649 |
+
self._g.replace_inputs(node, [node.input[0], new_axes_const.output[0]])
|
| 650 |
+
# 3 set shape
|
| 651 |
+
squeeze_shape = self._g.get_shape(node.output[0])
|
| 652 |
+
self._g.set_shape(trans.output[0], squeeze_shape)
|
| 653 |
+
input_shape = self._g.get_shape(node.input[0])
|
| 654 |
+
if input_shape is not None:
|
| 655 |
+
new_squeeze_output_shape = [input_shape[i] for i in range(trans_rank) if i not in new_squeeze_axes]
|
| 656 |
+
else:
|
| 657 |
+
new_squeeze_output_shape = [-1] * trans_rank
|
| 658 |
+
self.logger.warning("%s's shape is unknown, which may interfere further optimization", node.input[0])
|
| 659 |
+
self._g.set_shape(node.output[0], new_squeeze_output_shape)
|
| 660 |
+
return True
|
| 661 |
+
return False
|
| 662 |
+
|
| 663 |
+
def _sub_handler(self, trans, node):
|
| 664 |
+
return self._handle_node_having_branches(trans, node)
|
| 665 |
+
|
| 666 |
+
def _pad_handler(self, trans, node):
|
| 667 |
+
trans_rank = get_transpose_rank(trans)
|
| 668 |
+
# [N-start, H-start, W-start, C-start, N-end, H-end, W-end, C-end]
|
| 669 |
+
if self._g.opset < 11:
|
| 670 |
+
pads = node.get_attr('pads').ints # [x1_begin, x2_begin...x1_end, x2_end,...]
|
| 671 |
+
# NHWC->NCHW
|
| 672 |
+
if trans_rank == 4:
|
| 673 |
+
new_pads = [pads[0], pads[3], pads[1], pads[2], pads[4], pads[7], pads[5], pads[6]]
|
| 674 |
+
else:
|
| 675 |
+
new_pads = [pads[0], pads[4], pads[1], pads[2], pads[3], pads[5], pads[9], pads[6], pads[7], pads[8]]
|
| 676 |
+
node.set_attr("pads", new_pads)
|
| 677 |
+
return self._switch_transpose_and_node(node, trans)
|
| 678 |
+
|
| 679 |
+
input1 = node.inputs[1]
|
| 680 |
+
if input1.is_const():
|
| 681 |
+
if input1.data_format in ["NHWC", "unkown"]:
|
| 682 |
+
if not self._nodes_has_single_consumer_node([input1]):
|
| 683 |
+
input1 = self._g.copy_const(input1)
|
| 684 |
+
self._g.replace_input(node, node.input[1], input1.output[0], 1)
|
| 685 |
+
pads = input1.get_tensor_value()
|
| 686 |
+
# NHWC->NCHW
|
| 687 |
+
if trans_rank == 4:
|
| 688 |
+
new_pads = np.array([pads[0], pads[3], pads[1], pads[2],
|
| 689 |
+
pads[4], pads[7], pads[5], pads[6]], dtype=np.int64)
|
| 690 |
+
else:
|
| 691 |
+
new_pads = np.array([pads[0], pads[4], pads[1], pads[2], pads[3],
|
| 692 |
+
pads[5], pads[9], pads[6], pads[7], pads[8]], dtype=np.int64)
|
| 693 |
+
input1.set_tensor_value(new_pads)
|
| 694 |
+
input1.data_format = "NCHW"
|
| 695 |
+
return self._switch_transpose_and_node(node, trans)
|
| 696 |
+
# when the second input is not a constant, let's shuffle it with Split followed by Concat
|
| 697 |
+
# there are examples of models, where this non-constant input
|
| 698 |
+
# gets constant folded anyway by a framework.
|
| 699 |
+
split = self._g.make_node("Split", inputs=[node.input[1]], attr={}, output_count=trans_rank * 2)
|
| 700 |
+
pads = split.output
|
| 701 |
+
if trans_rank == 4:
|
| 702 |
+
new_pads = self._g.make_node("Concat", [pads[0], pads[3], pads[1], pads[2],
|
| 703 |
+
pads[4], pads[7], pads[5], pads[6]],
|
| 704 |
+
{'axis': 0})
|
| 705 |
+
else:
|
| 706 |
+
new_pads = self._g.make_node("Concat", [pads[0], pads[4], pads[1], pads[2], pads[3],
|
| 707 |
+
pads[5], pads[9], pads[6], pads[7], pads[8]],
|
| 708 |
+
{'axis': 0})
|
| 709 |
+
self._g.replace_input(node, node.input[1], new_pads.output[0], 1)
|
| 710 |
+
return self._switch_transpose_and_node(node, trans)
|
| 711 |
+
|
| 712 |
+
def _arg_min_max_handler(self, trans, node):
|
| 713 |
+
axis = node.get_attr_value("axis", 0)
|
| 714 |
+
node.set_attr("axes", [axis])
|
| 715 |
+
result = self._reduce_handler(trans, node)
|
| 716 |
+
new_axis = node.get_attr_value("axes")[0]
|
| 717 |
+
node.set_attr("axis", new_axis)
|
| 718 |
+
del node.attr["axes"]
|
| 719 |
+
return result
|
| 720 |
+
|
| 721 |
+
def _reduce_handler(self, trans, node):
|
| 722 |
+
keepdims = node.get_attr_value("keepdims", 1)
|
| 723 |
+
trans_rank = get_transpose_rank(trans)
|
| 724 |
+
axes = node.get_attr_value("axes", list(range(trans_rank)))
|
| 725 |
+
perm = trans.get_attr("perm").ints
|
| 726 |
+
axes = [a + trans_rank if a < 0 else a for a in axes]
|
| 727 |
+
new_axes = [perm[a] for a in axes]
|
| 728 |
+
update_shape = keepdims == 1
|
| 729 |
+
shape = self._g.get_shape(node.output[0])
|
| 730 |
+
if not self._switch_transpose_and_node(node, trans, update_shape):
|
| 731 |
+
return False
|
| 732 |
+
node.set_attr("axes", new_axes)
|
| 733 |
+
if keepdims == 0:
|
| 734 |
+
remaining_axes = []
|
| 735 |
+
j = 0
|
| 736 |
+
for i in range(trans_rank):
|
| 737 |
+
if i in new_axes:
|
| 738 |
+
remaining_axes.append(None)
|
| 739 |
+
else:
|
| 740 |
+
remaining_axes.append(j)
|
| 741 |
+
j += 1
|
| 742 |
+
new_perm = [remaining_axes[p] for p in perm if remaining_axes[p] is not None]
|
| 743 |
+
if shape:
|
| 744 |
+
new_shape = [shape[new_perm.index(i)] for i in range(len(new_perm))]
|
| 745 |
+
self._g.set_shape(node.output[0], new_shape)
|
| 746 |
+
trans.set_attr("perm", new_perm)
|
| 747 |
+
return True
|
| 748 |
+
|
| 749 |
+
def _reducesum_handler(self, trans, node):
|
| 750 |
+
keepdims = node.get_attr("keepdims")
|
| 751 |
+
if self._g.opset <= 12:
|
| 752 |
+
return self._reduce_handler(trans, node)
|
| 753 |
+
if keepdims and keepdims.i == 0:
|
| 754 |
+
return False
|
| 755 |
+
if node.inputs[1].is_const():
|
| 756 |
+
axes = node.inputs[1].get_tensor_value()
|
| 757 |
+
perm = trans.get_attr('perm').ints
|
| 758 |
+
axes = [perm[axes[i]] for i in range(len(axes))]
|
| 759 |
+
new_axes = np.array(axes, dtype=np.int64)
|
| 760 |
+
if self._nodes_has_single_consumer_node([node.inputs[1]]):
|
| 761 |
+
node.inputs[1].set_tensor_value(new_axes)
|
| 762 |
+
else:
|
| 763 |
+
new_axes_const = self._g.make_const(
|
| 764 |
+
utils.make_name(node.inputs[1].name), new_axes
|
| 765 |
+
)
|
| 766 |
+
self._g.replace_input(node, node.input[1], new_axes_const.output[0], 1)
|
| 767 |
+
return self._switch_transpose_and_node(node, trans)
|
| 768 |
+
return False
|
| 769 |
+
|
| 770 |
+
def _slice_handler(self, trans, node):
|
| 771 |
+
trans_rank = get_transpose_rank(trans)
|
| 772 |
+
axes = None
|
| 773 |
+
if self._g.opset < 10:
|
| 774 |
+
axes_values = node.get_attr("axes")
|
| 775 |
+
if not axes_values:
|
| 776 |
+
return False
|
| 777 |
+
axes = axes_values.ints
|
| 778 |
+
perm = NCHW_TO_NHWC if trans_rank == 4 else NCDHW_TO_NDHWC
|
| 779 |
+
new_axes = [perm[axes[i]] for i in range(len(axes))]
|
| 780 |
+
node.set_attr("axes", new_axes)
|
| 781 |
+
return self._switch_transpose_and_node(node, trans)
|
| 782 |
+
# in opset 10, axes is input instead of an attribute.
|
| 783 |
+
if len(node.inputs) >= 4 and node.inputs[3].is_const():
|
| 784 |
+
axes = node.inputs[3].get_tensor_value(as_list=False)
|
| 785 |
+
dtype = axes.dtype
|
| 786 |
+
axes = axes.tolist()
|
| 787 |
+
perm = NCHW_TO_NHWC if trans_rank == 4 else NCDHW_TO_NDHWC
|
| 788 |
+
axes = [perm[axes[i]] for i in range(len(axes))]
|
| 789 |
+
# axes node might be shared
|
| 790 |
+
new_axes = np.array(axes, dtype=dtype)
|
| 791 |
+
if self._nodes_has_single_consumer_node([node.inputs[3]]):
|
| 792 |
+
node.inputs[3].set_tensor_value(new_axes)
|
| 793 |
+
else:
|
| 794 |
+
new_axes_const = self._g.make_const(
|
| 795 |
+
utils.make_name(node.inputs[3].name), new_axes
|
| 796 |
+
)
|
| 797 |
+
self._g.replace_input(node, node.input[3], new_axes_const.output[0], 3)
|
| 798 |
+
return self._switch_transpose_and_node(node, trans)
|
| 799 |
+
return False
|
| 800 |
+
|
| 801 |
+
def _quantize_handler(self, trans, node):
|
| 802 |
+
# Used for QuantizeLinear and DequantizeLinear
|
| 803 |
+
if not self._switch_transpose_and_node(node, trans):
|
| 804 |
+
return False
|
| 805 |
+
if 'axis' in node.attr:
|
| 806 |
+
perm = trans.get_attr_value("perm")
|
| 807 |
+
axis = node.get_attr_value("axis")
|
| 808 |
+
new_axis = perm[axis]
|
| 809 |
+
node.set_attr("axis", new_axis)
|
| 810 |
+
return True
|
| 811 |
+
|
| 812 |
+
def _simple_through_handler(self, trans, node):
|
| 813 |
+
return self._switch_transpose_and_node(node, trans)
|
| 814 |
+
|
| 815 |
+
def _shape_handler(self, trans, node):
|
| 816 |
+
# input > trans > shape can be changed into input > shape > gather
|
| 817 |
+
if not self._nodes_has_single_consumer_node([trans]):
|
| 818 |
+
return False
|
| 819 |
+
|
| 820 |
+
output_shape = self._g.get_shape(node.output[0])
|
| 821 |
+
output_dtype = self._g.get_dtype(node.output[0])
|
| 822 |
+
self._g.remove_node(trans.name)
|
| 823 |
+
self._g.remove_node(node.name)
|
| 824 |
+
shape_node = self._g.make_node("Shape", [trans.input[0]])
|
| 825 |
+
const_node = self._g.make_const(utils.make_name("Const"), np.array(trans.get_attr("perm").ints))
|
| 826 |
+
gather_node = self._g.make_node("Gather", [shape_node.output[0], const_node.output[0]], outputs=node.output)
|
| 827 |
+
self._g.set_shape(gather_node.output[0], output_shape)
|
| 828 |
+
self._g.set_dtype(gather_node.output[0], output_dtype)
|
| 829 |
+
return True
|
lib/python3.10/site-packages/tf2onnx/optimizer/upsample_optimizer.py
ADDED
|
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
|
| 3 |
+
"""Resize Optimizer.
|
| 4 |
+
Replace resize operations with all ones in scale with Identity nodes
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
from __future__ import unicode_literals
|
| 8 |
+
|
| 9 |
+
import numpy as np
|
| 10 |
+
|
| 11 |
+
from .optimizer_base import GraphOptimizerBase
|
| 12 |
+
|
| 13 |
+
# pylint: disable=logging-not-lazy,unused-argument,missing-docstring,unused-variable,arguments-differ
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class UpsampleOptimizer(GraphOptimizerBase):
|
| 17 |
+
"""Upsample Optimizer."""
|
| 18 |
+
|
| 19 |
+
def __init__(self): # pylint: disable=useless-super-delegation
|
| 20 |
+
super(UpsampleOptimizer, self).__init__()
|
| 21 |
+
self._g = None
|
| 22 |
+
|
| 23 |
+
def _optimize(self, graph):
|
| 24 |
+
return self._apply_optimization(
|
| 25 |
+
graph,
|
| 26 |
+
self._optimize_at_current_graph_level)
|
| 27 |
+
|
| 28 |
+
def _optimize_at_current_graph_level(self, graph):
|
| 29 |
+
self._g = graph
|
| 30 |
+
# replace upsample node with all ones in scale with identity node
|
| 31 |
+
for n in self._g.get_nodes():
|
| 32 |
+
if n.type == "Upsample":
|
| 33 |
+
node_changed = False
|
| 34 |
+
# upsample in opset <=8 has scales in attributes
|
| 35 |
+
if self._g.opset <= 8:
|
| 36 |
+
scales = n.get_attr_value("scales")
|
| 37 |
+
if scales and all([float(s) == 1. for s in scales]):
|
| 38 |
+
n.type = "Identity"
|
| 39 |
+
node_changed = True
|
| 40 |
+
# upsample in opset >= 9 has scales in input[1]
|
| 41 |
+
if self._g.opset >= 9 and len(n.input) == 2:
|
| 42 |
+
scales_input = n.inputs[1]
|
| 43 |
+
|
| 44 |
+
if scales_input.is_const() and \
|
| 45 |
+
np.all(scales_input.get_tensor_value(as_list=False) == 1.):
|
| 46 |
+
n.type = "Identity"
|
| 47 |
+
n.input = [n.input[0]]
|
| 48 |
+
node_changed = True
|
| 49 |
+
if node_changed:
|
| 50 |
+
self.logger.debug("replacing " + n.name +
|
| 51 |
+
" with Identity operation ")
|
| 52 |
+
|
| 53 |
+
return self._g
|
lib/python3.10/site-packages/tf2onnx/tflite/BroadcastToOptions.py
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
|
| 3 |
+
# automatically generated by the FlatBuffers compiler, do not modify
|
| 4 |
+
|
| 5 |
+
# namespace: tflite
|
| 6 |
+
|
| 7 |
+
import flatbuffers
|
| 8 |
+
from flatbuffers.compat import import_numpy
|
| 9 |
+
np = import_numpy()
|
| 10 |
+
|
| 11 |
+
class BroadcastToOptions(object):
|
| 12 |
+
__slots__ = ['_tab']
|
| 13 |
+
|
| 14 |
+
@classmethod
|
| 15 |
+
def GetRootAsBroadcastToOptions(cls, buf, offset):
|
| 16 |
+
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
|
| 17 |
+
x = BroadcastToOptions()
|
| 18 |
+
x.Init(buf, n + offset)
|
| 19 |
+
return x
|
| 20 |
+
|
| 21 |
+
@classmethod
|
| 22 |
+
def BroadcastToOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
|
| 23 |
+
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
|
| 24 |
+
|
| 25 |
+
# BroadcastToOptions
|
| 26 |
+
def Init(self, buf, pos):
|
| 27 |
+
self._tab = flatbuffers.table.Table(buf, pos)
|
| 28 |
+
|
| 29 |
+
def BroadcastToOptionsStart(builder): builder.StartObject(0)
|
| 30 |
+
def BroadcastToOptionsEnd(builder): return builder.EndObject()
|
lib/python3.10/site-packages/tf2onnx/tflite/BuiltinOperator.py
ADDED
|
@@ -0,0 +1,140 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
|
| 3 |
+
# automatically generated by the FlatBuffers compiler, do not modify
|
| 4 |
+
|
| 5 |
+
# namespace: tflite
|
| 6 |
+
|
| 7 |
+
class BuiltinOperator(object):
|
| 8 |
+
ADD = 0
|
| 9 |
+
AVERAGE_POOL_2D = 1
|
| 10 |
+
CONCATENATION = 2
|
| 11 |
+
CONV_2D = 3
|
| 12 |
+
DEPTHWISE_CONV_2D = 4
|
| 13 |
+
DEPTH_TO_SPACE = 5
|
| 14 |
+
DEQUANTIZE = 6
|
| 15 |
+
EMBEDDING_LOOKUP = 7
|
| 16 |
+
FLOOR = 8
|
| 17 |
+
FULLY_CONNECTED = 9
|
| 18 |
+
HASHTABLE_LOOKUP = 10
|
| 19 |
+
L2_NORMALIZATION = 11
|
| 20 |
+
L2_POOL_2D = 12
|
| 21 |
+
LOCAL_RESPONSE_NORMALIZATION = 13
|
| 22 |
+
LOGISTIC = 14
|
| 23 |
+
LSH_PROJECTION = 15
|
| 24 |
+
LSTM = 16
|
| 25 |
+
MAX_POOL_2D = 17
|
| 26 |
+
MUL = 18
|
| 27 |
+
RELU = 19
|
| 28 |
+
RELU_N1_TO_1 = 20
|
| 29 |
+
RELU6 = 21
|
| 30 |
+
RESHAPE = 22
|
| 31 |
+
RESIZE_BILINEAR = 23
|
| 32 |
+
RNN = 24
|
| 33 |
+
SOFTMAX = 25
|
| 34 |
+
SPACE_TO_DEPTH = 26
|
| 35 |
+
SVDF = 27
|
| 36 |
+
TANH = 28
|
| 37 |
+
CONCAT_EMBEDDINGS = 29
|
| 38 |
+
SKIP_GRAM = 30
|
| 39 |
+
CALL = 31
|
| 40 |
+
CUSTOM = 32
|
| 41 |
+
EMBEDDING_LOOKUP_SPARSE = 33
|
| 42 |
+
PAD = 34
|
| 43 |
+
UNIDIRECTIONAL_SEQUENCE_RNN = 35
|
| 44 |
+
GATHER = 36
|
| 45 |
+
BATCH_TO_SPACE_ND = 37
|
| 46 |
+
SPACE_TO_BATCH_ND = 38
|
| 47 |
+
TRANSPOSE = 39
|
| 48 |
+
MEAN = 40
|
| 49 |
+
SUB = 41
|
| 50 |
+
DIV = 42
|
| 51 |
+
SQUEEZE = 43
|
| 52 |
+
UNIDIRECTIONAL_SEQUENCE_LSTM = 44
|
| 53 |
+
STRIDED_SLICE = 45
|
| 54 |
+
BIDIRECTIONAL_SEQUENCE_RNN = 46
|
| 55 |
+
EXP = 47
|
| 56 |
+
TOPK_V2 = 48
|
| 57 |
+
SPLIT = 49
|
| 58 |
+
LOG_SOFTMAX = 50
|
| 59 |
+
DELEGATE = 51
|
| 60 |
+
BIDIRECTIONAL_SEQUENCE_LSTM = 52
|
| 61 |
+
CAST = 53
|
| 62 |
+
PRELU = 54
|
| 63 |
+
MAXIMUM = 55
|
| 64 |
+
ARG_MAX = 56
|
| 65 |
+
MINIMUM = 57
|
| 66 |
+
LESS = 58
|
| 67 |
+
NEG = 59
|
| 68 |
+
PADV2 = 60
|
| 69 |
+
GREATER = 61
|
| 70 |
+
GREATER_EQUAL = 62
|
| 71 |
+
LESS_EQUAL = 63
|
| 72 |
+
SELECT = 64
|
| 73 |
+
SLICE = 65
|
| 74 |
+
SIN = 66
|
| 75 |
+
TRANSPOSE_CONV = 67
|
| 76 |
+
SPARSE_TO_DENSE = 68
|
| 77 |
+
TILE = 69
|
| 78 |
+
EXPAND_DIMS = 70
|
| 79 |
+
EQUAL = 71
|
| 80 |
+
NOT_EQUAL = 72
|
| 81 |
+
LOG = 73
|
| 82 |
+
SUM = 74
|
| 83 |
+
SQRT = 75
|
| 84 |
+
RSQRT = 76
|
| 85 |
+
SHAPE = 77
|
| 86 |
+
POW = 78
|
| 87 |
+
ARG_MIN = 79
|
| 88 |
+
FAKE_QUANT = 80
|
| 89 |
+
REDUCE_PROD = 81
|
| 90 |
+
REDUCE_MAX = 82
|
| 91 |
+
PACK = 83
|
| 92 |
+
LOGICAL_OR = 84
|
| 93 |
+
ONE_HOT = 85
|
| 94 |
+
LOGICAL_AND = 86
|
| 95 |
+
LOGICAL_NOT = 87
|
| 96 |
+
UNPACK = 88
|
| 97 |
+
REDUCE_MIN = 89
|
| 98 |
+
FLOOR_DIV = 90
|
| 99 |
+
REDUCE_ANY = 91
|
| 100 |
+
SQUARE = 92
|
| 101 |
+
ZEROS_LIKE = 93
|
| 102 |
+
FILL = 94
|
| 103 |
+
FLOOR_MOD = 95
|
| 104 |
+
RANGE = 96
|
| 105 |
+
RESIZE_NEAREST_NEIGHBOR = 97
|
| 106 |
+
LEAKY_RELU = 98
|
| 107 |
+
SQUARED_DIFFERENCE = 99
|
| 108 |
+
MIRROR_PAD = 100
|
| 109 |
+
ABS = 101
|
| 110 |
+
SPLIT_V = 102
|
| 111 |
+
UNIQUE = 103
|
| 112 |
+
CEIL = 104
|
| 113 |
+
REVERSE_V2 = 105
|
| 114 |
+
ADD_N = 106
|
| 115 |
+
GATHER_ND = 107
|
| 116 |
+
COS = 108
|
| 117 |
+
WHERE = 109
|
| 118 |
+
RANK = 110
|
| 119 |
+
ELU = 111
|
| 120 |
+
REVERSE_SEQUENCE = 112
|
| 121 |
+
MATRIX_DIAG = 113
|
| 122 |
+
QUANTIZE = 114
|
| 123 |
+
MATRIX_SET_DIAG = 115
|
| 124 |
+
ROUND = 116
|
| 125 |
+
HARD_SWISH = 117
|
| 126 |
+
IF = 118
|
| 127 |
+
WHILE = 119
|
| 128 |
+
NON_MAX_SUPPRESSION_V4 = 120
|
| 129 |
+
NON_MAX_SUPPRESSION_V5 = 121
|
| 130 |
+
SCATTER_ND = 122
|
| 131 |
+
SELECT_V2 = 123
|
| 132 |
+
DENSIFY = 124
|
| 133 |
+
SEGMENT_SUM = 125
|
| 134 |
+
BATCH_MATMUL = 126
|
| 135 |
+
PLACEHOLDER_FOR_GREATER_OP_CODES = 127
|
| 136 |
+
CUMSUM = 128
|
| 137 |
+
CALL_ONCE = 129
|
| 138 |
+
BROADCAST_TO = 130
|
| 139 |
+
RFFT2D = 131
|
| 140 |
+
|
lib/python3.10/site-packages/tf2onnx/tflite/BuiltinOptions.py
ADDED
|
@@ -0,0 +1,114 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
|
| 3 |
+
# automatically generated by the FlatBuffers compiler, do not modify
|
| 4 |
+
|
| 5 |
+
# namespace: tflite
|
| 6 |
+
|
| 7 |
+
class BuiltinOptions(object):
|
| 8 |
+
NONE = 0
|
| 9 |
+
Conv2DOptions = 1
|
| 10 |
+
DepthwiseConv2DOptions = 2
|
| 11 |
+
ConcatEmbeddingsOptions = 3
|
| 12 |
+
LSHProjectionOptions = 4
|
| 13 |
+
Pool2DOptions = 5
|
| 14 |
+
SVDFOptions = 6
|
| 15 |
+
RNNOptions = 7
|
| 16 |
+
FullyConnectedOptions = 8
|
| 17 |
+
SoftmaxOptions = 9
|
| 18 |
+
ConcatenationOptions = 10
|
| 19 |
+
AddOptions = 11
|
| 20 |
+
L2NormOptions = 12
|
| 21 |
+
LocalResponseNormalizationOptions = 13
|
| 22 |
+
LSTMOptions = 14
|
| 23 |
+
ResizeBilinearOptions = 15
|
| 24 |
+
CallOptions = 16
|
| 25 |
+
ReshapeOptions = 17
|
| 26 |
+
SkipGramOptions = 18
|
| 27 |
+
SpaceToDepthOptions = 19
|
| 28 |
+
EmbeddingLookupSparseOptions = 20
|
| 29 |
+
MulOptions = 21
|
| 30 |
+
PadOptions = 22
|
| 31 |
+
GatherOptions = 23
|
| 32 |
+
BatchToSpaceNDOptions = 24
|
| 33 |
+
SpaceToBatchNDOptions = 25
|
| 34 |
+
TransposeOptions = 26
|
| 35 |
+
ReducerOptions = 27
|
| 36 |
+
SubOptions = 28
|
| 37 |
+
DivOptions = 29
|
| 38 |
+
SqueezeOptions = 30
|
| 39 |
+
SequenceRNNOptions = 31
|
| 40 |
+
StridedSliceOptions = 32
|
| 41 |
+
ExpOptions = 33
|
| 42 |
+
TopKV2Options = 34
|
| 43 |
+
SplitOptions = 35
|
| 44 |
+
LogSoftmaxOptions = 36
|
| 45 |
+
CastOptions = 37
|
| 46 |
+
DequantizeOptions = 38
|
| 47 |
+
MaximumMinimumOptions = 39
|
| 48 |
+
ArgMaxOptions = 40
|
| 49 |
+
LessOptions = 41
|
| 50 |
+
NegOptions = 42
|
| 51 |
+
PadV2Options = 43
|
| 52 |
+
GreaterOptions = 44
|
| 53 |
+
GreaterEqualOptions = 45
|
| 54 |
+
LessEqualOptions = 46
|
| 55 |
+
SelectOptions = 47
|
| 56 |
+
SliceOptions = 48
|
| 57 |
+
TransposeConvOptions = 49
|
| 58 |
+
SparseToDenseOptions = 50
|
| 59 |
+
TileOptions = 51
|
| 60 |
+
ExpandDimsOptions = 52
|
| 61 |
+
EqualOptions = 53
|
| 62 |
+
NotEqualOptions = 54
|
| 63 |
+
ShapeOptions = 55
|
| 64 |
+
PowOptions = 56
|
| 65 |
+
ArgMinOptions = 57
|
| 66 |
+
FakeQuantOptions = 58
|
| 67 |
+
PackOptions = 59
|
| 68 |
+
LogicalOrOptions = 60
|
| 69 |
+
OneHotOptions = 61
|
| 70 |
+
LogicalAndOptions = 62
|
| 71 |
+
LogicalNotOptions = 63
|
| 72 |
+
UnpackOptions = 64
|
| 73 |
+
FloorDivOptions = 65
|
| 74 |
+
SquareOptions = 66
|
| 75 |
+
ZerosLikeOptions = 67
|
| 76 |
+
FillOptions = 68
|
| 77 |
+
BidirectionalSequenceLSTMOptions = 69
|
| 78 |
+
BidirectionalSequenceRNNOptions = 70
|
| 79 |
+
UnidirectionalSequenceLSTMOptions = 71
|
| 80 |
+
FloorModOptions = 72
|
| 81 |
+
RangeOptions = 73
|
| 82 |
+
ResizeNearestNeighborOptions = 74
|
| 83 |
+
LeakyReluOptions = 75
|
| 84 |
+
SquaredDifferenceOptions = 76
|
| 85 |
+
MirrorPadOptions = 77
|
| 86 |
+
AbsOptions = 78
|
| 87 |
+
SplitVOptions = 79
|
| 88 |
+
UniqueOptions = 80
|
| 89 |
+
ReverseV2Options = 81
|
| 90 |
+
AddNOptions = 82
|
| 91 |
+
GatherNdOptions = 83
|
| 92 |
+
CosOptions = 84
|
| 93 |
+
WhereOptions = 85
|
| 94 |
+
RankOptions = 86
|
| 95 |
+
ReverseSequenceOptions = 87
|
| 96 |
+
MatrixDiagOptions = 88
|
| 97 |
+
QuantizeOptions = 89
|
| 98 |
+
MatrixSetDiagOptions = 90
|
| 99 |
+
HardSwishOptions = 91
|
| 100 |
+
IfOptions = 92
|
| 101 |
+
WhileOptions = 93
|
| 102 |
+
DepthToSpaceOptions = 94
|
| 103 |
+
NonMaxSuppressionV4Options = 95
|
| 104 |
+
NonMaxSuppressionV5Options = 96
|
| 105 |
+
ScatterNdOptions = 97
|
| 106 |
+
SelectV2Options = 98
|
| 107 |
+
DensifyOptions = 99
|
| 108 |
+
SegmentSumOptions = 100
|
| 109 |
+
BatchMatMulOptions = 101
|
| 110 |
+
CumsumOptions = 102
|
| 111 |
+
CallOnceOptions = 103
|
| 112 |
+
BroadcastToOptions = 104
|
| 113 |
+
Rfft2dOptions = 105
|
| 114 |
+
|
lib/python3.10/site-packages/tf2onnx/tflite/CallOptions.py
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
|
| 3 |
+
# automatically generated by the FlatBuffers compiler, do not modify
|
| 4 |
+
|
| 5 |
+
# namespace: tflite
|
| 6 |
+
|
| 7 |
+
import flatbuffers
|
| 8 |
+
from flatbuffers.compat import import_numpy
|
| 9 |
+
np = import_numpy()
|
| 10 |
+
|
| 11 |
+
class CallOptions(object):
|
| 12 |
+
__slots__ = ['_tab']
|
| 13 |
+
|
| 14 |
+
@classmethod
|
| 15 |
+
def GetRootAsCallOptions(cls, buf, offset):
|
| 16 |
+
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
|
| 17 |
+
x = CallOptions()
|
| 18 |
+
x.Init(buf, n + offset)
|
| 19 |
+
return x
|
| 20 |
+
|
| 21 |
+
@classmethod
|
| 22 |
+
def CallOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
|
| 23 |
+
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
|
| 24 |
+
|
| 25 |
+
# CallOptions
|
| 26 |
+
def Init(self, buf, pos):
|
| 27 |
+
self._tab = flatbuffers.table.Table(buf, pos)
|
| 28 |
+
|
| 29 |
+
# CallOptions
|
| 30 |
+
def Subgraph(self):
|
| 31 |
+
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
|
| 32 |
+
if o != 0:
|
| 33 |
+
return self._tab.Get(flatbuffers.number_types.Uint32Flags, o + self._tab.Pos)
|
| 34 |
+
return 0
|
| 35 |
+
|
| 36 |
+
def CallOptionsStart(builder): builder.StartObject(1)
|
| 37 |
+
def CallOptionsAddSubgraph(builder, subgraph): builder.PrependUint32Slot(0, subgraph, 0)
|
| 38 |
+
def CallOptionsEnd(builder): return builder.EndObject()
|
lib/python3.10/site-packages/tf2onnx/tflite/ConcatEmbeddingsOptions.py
ADDED
|
@@ -0,0 +1,96 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
|
| 3 |
+
# automatically generated by the FlatBuffers compiler, do not modify
|
| 4 |
+
|
| 5 |
+
# namespace: tflite
|
| 6 |
+
|
| 7 |
+
import flatbuffers
|
| 8 |
+
from flatbuffers.compat import import_numpy
|
| 9 |
+
np = import_numpy()
|
| 10 |
+
|
| 11 |
+
class ConcatEmbeddingsOptions(object):
|
| 12 |
+
__slots__ = ['_tab']
|
| 13 |
+
|
| 14 |
+
@classmethod
|
| 15 |
+
def GetRootAsConcatEmbeddingsOptions(cls, buf, offset):
|
| 16 |
+
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
|
| 17 |
+
x = ConcatEmbeddingsOptions()
|
| 18 |
+
x.Init(buf, n + offset)
|
| 19 |
+
return x
|
| 20 |
+
|
| 21 |
+
@classmethod
|
| 22 |
+
def ConcatEmbeddingsOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
|
| 23 |
+
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
|
| 24 |
+
|
| 25 |
+
# ConcatEmbeddingsOptions
|
| 26 |
+
def Init(self, buf, pos):
|
| 27 |
+
self._tab = flatbuffers.table.Table(buf, pos)
|
| 28 |
+
|
| 29 |
+
# ConcatEmbeddingsOptions
|
| 30 |
+
def NumChannels(self):
|
| 31 |
+
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
|
| 32 |
+
if o != 0:
|
| 33 |
+
return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
|
| 34 |
+
return 0
|
| 35 |
+
|
| 36 |
+
# ConcatEmbeddingsOptions
|
| 37 |
+
def NumColumnsPerChannel(self, j):
|
| 38 |
+
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
|
| 39 |
+
if o != 0:
|
| 40 |
+
a = self._tab.Vector(o)
|
| 41 |
+
return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
|
| 42 |
+
return 0
|
| 43 |
+
|
| 44 |
+
# ConcatEmbeddingsOptions
|
| 45 |
+
def NumColumnsPerChannelAsNumpy(self):
|
| 46 |
+
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
|
| 47 |
+
if o != 0:
|
| 48 |
+
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o)
|
| 49 |
+
return 0
|
| 50 |
+
|
| 51 |
+
# ConcatEmbeddingsOptions
|
| 52 |
+
def NumColumnsPerChannelLength(self):
|
| 53 |
+
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
|
| 54 |
+
if o != 0:
|
| 55 |
+
return self._tab.VectorLen(o)
|
| 56 |
+
return 0
|
| 57 |
+
|
| 58 |
+
# ConcatEmbeddingsOptions
|
| 59 |
+
def NumColumnsPerChannelIsNone(self):
|
| 60 |
+
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
|
| 61 |
+
return o == 0
|
| 62 |
+
|
| 63 |
+
# ConcatEmbeddingsOptions
|
| 64 |
+
def EmbeddingDimPerChannel(self, j):
|
| 65 |
+
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
|
| 66 |
+
if o != 0:
|
| 67 |
+
a = self._tab.Vector(o)
|
| 68 |
+
return self._tab.Get(flatbuffers.number_types.Int32Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4))
|
| 69 |
+
return 0
|
| 70 |
+
|
| 71 |
+
# ConcatEmbeddingsOptions
|
| 72 |
+
def EmbeddingDimPerChannelAsNumpy(self):
|
| 73 |
+
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
|
| 74 |
+
if o != 0:
|
| 75 |
+
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o)
|
| 76 |
+
return 0
|
| 77 |
+
|
| 78 |
+
# ConcatEmbeddingsOptions
|
| 79 |
+
def EmbeddingDimPerChannelLength(self):
|
| 80 |
+
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
|
| 81 |
+
if o != 0:
|
| 82 |
+
return self._tab.VectorLen(o)
|
| 83 |
+
return 0
|
| 84 |
+
|
| 85 |
+
# ConcatEmbeddingsOptions
|
| 86 |
+
def EmbeddingDimPerChannelIsNone(self):
|
| 87 |
+
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
|
| 88 |
+
return o == 0
|
| 89 |
+
|
| 90 |
+
def ConcatEmbeddingsOptionsStart(builder): builder.StartObject(3)
|
| 91 |
+
def ConcatEmbeddingsOptionsAddNumChannels(builder, numChannels): builder.PrependInt32Slot(0, numChannels, 0)
|
| 92 |
+
def ConcatEmbeddingsOptionsAddNumColumnsPerChannel(builder, numColumnsPerChannel): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(numColumnsPerChannel), 0)
|
| 93 |
+
def ConcatEmbeddingsOptionsStartNumColumnsPerChannelVector(builder, numElems): return builder.StartVector(4, numElems, 4)
|
| 94 |
+
def ConcatEmbeddingsOptionsAddEmbeddingDimPerChannel(builder, embeddingDimPerChannel): builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(embeddingDimPerChannel), 0)
|
| 95 |
+
def ConcatEmbeddingsOptionsStartEmbeddingDimPerChannelVector(builder, numElems): return builder.StartVector(4, numElems, 4)
|
| 96 |
+
def ConcatEmbeddingsOptionsEnd(builder): return builder.EndObject()
|