Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- lib/python3.10/site-packages/numba/typed/__init__.py +20 -0
- lib/python3.10/site-packages/numba/typed/py.typed +0 -0
- lib/python3.10/site-packages/numba/typed/typedlist.py +688 -0
- lib/python3.10/site-packages/tf2onnx/__init__.py +16 -0
- lib/python3.10/site-packages/tf2onnx/constants.py +49 -0
- lib/python3.10/site-packages/tf2onnx/convert.py +467 -0
- lib/python3.10/site-packages/tf2onnx/flexbuffers.py +146 -0
- lib/python3.10/site-packages/tf2onnx/graph.py +1730 -0
- lib/python3.10/site-packages/tf2onnx/graph_builder.py +228 -0
- lib/python3.10/site-packages/tf2onnx/graph_matcher.py +277 -0
- lib/python3.10/site-packages/tf2onnx/handler.py +152 -0
- lib/python3.10/site-packages/tf2onnx/rewriter/__init__.py +50 -0
- lib/python3.10/site-packages/tf2onnx/rewriter/bigru_rewriter.py +106 -0
- lib/python3.10/site-packages/tf2onnx/rewriter/bilstm_rewriter.py +102 -0
- lib/python3.10/site-packages/tf2onnx/rewriter/cond_rewriter.py +320 -0
- lib/python3.10/site-packages/tf2onnx/rewriter/conv2d_with_add_rewriter.py +41 -0
- lib/python3.10/site-packages/tf2onnx/rewriter/conv2d_with_pad_rewriter.py +65 -0
- lib/python3.10/site-packages/tf2onnx/rewriter/custom_rnn_rewriter.py +228 -0
- lib/python3.10/site-packages/tf2onnx/rewriter/dropout_rewriter.py +103 -0
- lib/python3.10/site-packages/tf2onnx/rewriter/eye_rewriter.py +172 -0
- lib/python3.10/site-packages/tf2onnx/rewriter/flatten_rewriter.py +101 -0
- lib/python3.10/site-packages/tf2onnx/rewriter/gemm_rewriter.py +135 -0
- lib/python3.10/site-packages/tf2onnx/rewriter/gru_rewriter.py +260 -0
- lib/python3.10/site-packages/tf2onnx/rewriter/layer_normalization_rewriter.py +123 -0
- lib/python3.10/site-packages/tf2onnx/rewriter/leakyrelu_rewriter.py +57 -0
- lib/python3.10/site-packages/tf2onnx/rewriter/loop_rewriter.py +171 -0
- lib/python3.10/site-packages/tf2onnx/rewriter/loop_rewriter_base.py +451 -0
- lib/python3.10/site-packages/tf2onnx/rewriter/lstm_rewriter.py +433 -0
- lib/python3.10/site-packages/tf2onnx/rewriter/lstm_rewriter_base.py +190 -0
- lib/python3.10/site-packages/tf2onnx/rewriter/quantization_ops_rewriter.py +125 -0
- lib/python3.10/site-packages/tf2onnx/rewriter/random_normal_rewriter.py +60 -0
- lib/python3.10/site-packages/tf2onnx/rewriter/random_uniform.py +107 -0
- lib/python3.10/site-packages/tf2onnx/rewriter/rnn.py +50 -0
- lib/python3.10/site-packages/tf2onnx/rewriter/rnn_utils.py +585 -0
- lib/python3.10/site-packages/tf2onnx/rewriter/thresholded_relu_rewriter.py +49 -0
- lib/python3.10/site-packages/tf2onnx/rewriter/transpose_rewriter.py +34 -0
- lib/python3.10/site-packages/tf2onnx/rewriter/unit_rnn_rewriter_base.py +306 -0
- lib/python3.10/site-packages/tf2onnx/schemas.py +191 -0
- lib/python3.10/site-packages/tf2onnx/shape_inference.py +576 -0
- lib/python3.10/site-packages/tf2onnx/tf_loader.py +639 -0
- lib/python3.10/site-packages/tf2onnx/tf_utils.py +460 -0
- lib/python3.10/site-packages/tf2onnx/tflite/AbsOptions.py +30 -0
- lib/python3.10/site-packages/tf2onnx/tflite/ActivationFunctionType.py +14 -0
- lib/python3.10/site-packages/tf2onnx/tflite/ArgMinOptions.py +38 -0
- lib/python3.10/site-packages/tf2onnx/tflite/CosOptions.py +30 -0
- lib/python3.10/site-packages/tf2onnx/tflite/DepthToSpaceOptions.py +38 -0
- lib/python3.10/site-packages/tf2onnx/tflite/DepthwiseConv2DOptions.py +86 -0
- lib/python3.10/site-packages/tf2onnx/tflite/FillOptions.py +30 -0
- lib/python3.10/site-packages/tf2onnx/tflite/GatherOptions.py +38 -0
- lib/python3.10/site-packages/tf2onnx/tflite/GreaterOptions.py +30 -0
lib/python3.10/site-packages/numba/typed/__init__.py
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import importlib
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
_delayed_symbols = {
|
| 5 |
+
"Dict": ".typeddict",
|
| 6 |
+
"List": ".typedlist",
|
| 7 |
+
}
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
def __getattr__(name):
|
| 11 |
+
# Uses PEP-562 but requires python>3.6
|
| 12 |
+
if name in _delayed_symbols:
|
| 13 |
+
modpath = _delayed_symbols[name]
|
| 14 |
+
mod = importlib.import_module(modpath, __name__)
|
| 15 |
+
return getattr(mod, name)
|
| 16 |
+
else:
|
| 17 |
+
try:
|
| 18 |
+
return importlib.import_module(f".{name}", __name__)
|
| 19 |
+
except ModuleNotFoundError:
|
| 20 |
+
raise AttributeError
|
lib/python3.10/site-packages/numba/typed/py.typed
ADDED
|
File without changes
|
lib/python3.10/site-packages/numba/typed/typedlist.py
ADDED
|
@@ -0,0 +1,688 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Python wrapper that connects CPython interpreter to the Numba typed-list.
|
| 3 |
+
|
| 4 |
+
This is the code that is used when creating typed lists outside of a `@jit`
|
| 5 |
+
context and when returning a typed-list from a `@jit` decorated function. It
|
| 6 |
+
basically a Python class that has a Numba allocated typed-list under the hood
|
| 7 |
+
and uses `@jit` functions to access it. Since it inherits from MutableSequence
|
| 8 |
+
it should really quack like the CPython `list`.
|
| 9 |
+
|
| 10 |
+
"""
|
| 11 |
+
from collections.abc import MutableSequence
|
| 12 |
+
|
| 13 |
+
from numba.core.types import ListType
|
| 14 |
+
from numba.core.imputils import numba_typeref_ctor
|
| 15 |
+
from numba.core.dispatcher import Dispatcher
|
| 16 |
+
from numba.core import types, config, cgutils
|
| 17 |
+
from numba import njit, typeof
|
| 18 |
+
from numba.core.extending import (
|
| 19 |
+
overload,
|
| 20 |
+
box,
|
| 21 |
+
unbox,
|
| 22 |
+
NativeValue,
|
| 23 |
+
type_callable,
|
| 24 |
+
overload_classmethod,
|
| 25 |
+
)
|
| 26 |
+
from numba.typed import listobject
|
| 27 |
+
from numba.core.errors import TypingError, LoweringError
|
| 28 |
+
from numba.core.typing.templates import Signature
|
| 29 |
+
import typing as pt
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
Int_or_Slice = pt.Union["pt.SupportsIndex", slice]
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
T_co = pt.TypeVar('T_co', covariant=True)
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
class _Sequence(pt.Protocol[T_co]):
|
| 39 |
+
def __getitem__(self, i: int) -> T_co:
|
| 40 |
+
...
|
| 41 |
+
|
| 42 |
+
def __len__(self) -> int:
|
| 43 |
+
...
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
DEFAULT_ALLOCATED = listobject.DEFAULT_ALLOCATED
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
@njit
|
| 50 |
+
def _make_list(itemty, allocated=DEFAULT_ALLOCATED):
|
| 51 |
+
return listobject._as_meminfo(listobject.new_list(itemty,
|
| 52 |
+
allocated=allocated))
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
@njit
|
| 56 |
+
def _length(l):
|
| 57 |
+
return len(l)
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
@njit
|
| 61 |
+
def _allocated(l):
|
| 62 |
+
return l._allocated()
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
@njit
|
| 66 |
+
def _is_mutable(l):
|
| 67 |
+
return l._is_mutable()
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
@njit
|
| 71 |
+
def _make_mutable(l):
|
| 72 |
+
return l._make_mutable()
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
@njit
|
| 76 |
+
def _make_immutable(l):
|
| 77 |
+
return l._make_immutable()
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
@njit
|
| 81 |
+
def _append(l, item):
|
| 82 |
+
l.append(item)
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
@njit
|
| 86 |
+
def _setitem(l, i, item):
|
| 87 |
+
l[i] = item
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
@njit
|
| 91 |
+
def _getitem(l, i):
|
| 92 |
+
return l[i]
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
@njit
|
| 96 |
+
def _contains(l, item):
|
| 97 |
+
return item in l
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
@njit
|
| 101 |
+
def _count(l, item):
|
| 102 |
+
return l.count(item)
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
@njit
|
| 106 |
+
def _pop(l, i):
|
| 107 |
+
return l.pop(i)
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
@njit
|
| 111 |
+
def _delitem(l, i):
|
| 112 |
+
del l[i]
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
@njit
|
| 116 |
+
def _extend(l, iterable):
|
| 117 |
+
return l.extend(iterable)
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
@njit
|
| 121 |
+
def _insert(l, i, item):
|
| 122 |
+
l.insert(i, item)
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
@njit
|
| 126 |
+
def _remove(l, item):
|
| 127 |
+
l.remove(item)
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
@njit
|
| 131 |
+
def _clear(l):
|
| 132 |
+
l.clear()
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
@njit
|
| 136 |
+
def _reverse(l):
|
| 137 |
+
l.reverse()
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
@njit
|
| 141 |
+
def _copy(l):
|
| 142 |
+
return l.copy()
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
@njit
|
| 146 |
+
def _eq(t, o):
|
| 147 |
+
return t == o
|
| 148 |
+
|
| 149 |
+
|
| 150 |
+
@njit
|
| 151 |
+
def _ne(t, o):
|
| 152 |
+
return t != o
|
| 153 |
+
|
| 154 |
+
|
| 155 |
+
@njit
|
| 156 |
+
def _lt(t, o):
|
| 157 |
+
return t < o
|
| 158 |
+
|
| 159 |
+
|
| 160 |
+
@njit
|
| 161 |
+
def _le(t, o):
|
| 162 |
+
return t <= o
|
| 163 |
+
|
| 164 |
+
|
| 165 |
+
@njit
|
| 166 |
+
def _gt(t, o):
|
| 167 |
+
return t > o
|
| 168 |
+
|
| 169 |
+
|
| 170 |
+
@njit
|
| 171 |
+
def _ge(t, o):
|
| 172 |
+
return t >= o
|
| 173 |
+
|
| 174 |
+
|
| 175 |
+
@njit
|
| 176 |
+
def _index(l, item, start, end):
|
| 177 |
+
return l.index(item, start, end)
|
| 178 |
+
|
| 179 |
+
|
| 180 |
+
@njit
|
| 181 |
+
def _sort(l, key, reverse):
|
| 182 |
+
return l.sort(key, reverse)
|
| 183 |
+
|
| 184 |
+
|
| 185 |
+
def _from_meminfo_ptr(ptr, listtype):
|
| 186 |
+
return List(meminfo=ptr, lsttype=listtype)
|
| 187 |
+
|
| 188 |
+
|
| 189 |
+
T = pt.TypeVar('T')
|
| 190 |
+
T_or_ListT = pt.Union[T, 'List[T]']
|
| 191 |
+
|
| 192 |
+
|
| 193 |
+
class List(MutableSequence, pt.Generic[T]):
|
| 194 |
+
"""A typed-list usable in Numba compiled functions.
|
| 195 |
+
|
| 196 |
+
Implements the MutableSequence interface.
|
| 197 |
+
"""
|
| 198 |
+
|
| 199 |
+
_legal_kwargs = ["lsttype", "meminfo", "allocated"]
|
| 200 |
+
|
| 201 |
+
def __new__(cls,
|
| 202 |
+
*args,
|
| 203 |
+
lsttype=None,
|
| 204 |
+
meminfo=None,
|
| 205 |
+
allocated=DEFAULT_ALLOCATED,
|
| 206 |
+
**kwargs):
|
| 207 |
+
if config.DISABLE_JIT:
|
| 208 |
+
return list(*args, **kwargs)
|
| 209 |
+
else:
|
| 210 |
+
return object.__new__(cls)
|
| 211 |
+
|
| 212 |
+
@classmethod
|
| 213 |
+
def empty_list(cls, item_type, allocated=DEFAULT_ALLOCATED):
|
| 214 |
+
"""Create a new empty List.
|
| 215 |
+
|
| 216 |
+
Parameters
|
| 217 |
+
----------
|
| 218 |
+
item_type: Numba type
|
| 219 |
+
type of the list item.
|
| 220 |
+
allocated: int
|
| 221 |
+
number of items to pre-allocate
|
| 222 |
+
"""
|
| 223 |
+
if config.DISABLE_JIT:
|
| 224 |
+
return list()
|
| 225 |
+
else:
|
| 226 |
+
return cls(lsttype=ListType(item_type), allocated=allocated)
|
| 227 |
+
|
| 228 |
+
def __init__(self, *args, **kwargs):
|
| 229 |
+
"""
|
| 230 |
+
For users, the constructor does not take any parameters.
|
| 231 |
+
The keyword arguments are for internal use only.
|
| 232 |
+
|
| 233 |
+
Parameters
|
| 234 |
+
----------
|
| 235 |
+
args: iterable
|
| 236 |
+
The iterable to initialize the list from
|
| 237 |
+
lsttype : numba.core.types.ListType; keyword-only
|
| 238 |
+
Used internally for the list type.
|
| 239 |
+
meminfo : MemInfo; keyword-only
|
| 240 |
+
Used internally to pass the MemInfo object when boxing.
|
| 241 |
+
allocated: int; keyword-only
|
| 242 |
+
Used internally to pre-allocate space for items
|
| 243 |
+
"""
|
| 244 |
+
illegal_kwargs = any((kw not in self._legal_kwargs for kw in kwargs))
|
| 245 |
+
if illegal_kwargs or args and kwargs:
|
| 246 |
+
raise TypeError("List() takes no keyword arguments")
|
| 247 |
+
if kwargs:
|
| 248 |
+
self._list_type, self._opaque = self._parse_arg(**kwargs)
|
| 249 |
+
else:
|
| 250 |
+
self._list_type = None
|
| 251 |
+
if args:
|
| 252 |
+
if not 0 <= len(args) <= 1:
|
| 253 |
+
raise TypeError(
|
| 254 |
+
"List() expected at most 1 argument, got {}"
|
| 255 |
+
.format(len(args))
|
| 256 |
+
)
|
| 257 |
+
iterable = args[0]
|
| 258 |
+
# Special case Numpy scalars or anything that quacks like a
|
| 259 |
+
# NumPy Array.
|
| 260 |
+
if hasattr(iterable, "ndim") and iterable.ndim == 0:
|
| 261 |
+
self.append(iterable.item())
|
| 262 |
+
else:
|
| 263 |
+
try:
|
| 264 |
+
iter(iterable)
|
| 265 |
+
except TypeError:
|
| 266 |
+
raise TypeError("List() argument must be iterable")
|
| 267 |
+
for i in args[0]:
|
| 268 |
+
self.append(i)
|
| 269 |
+
|
| 270 |
+
def _parse_arg(self, lsttype, meminfo=None, allocated=DEFAULT_ALLOCATED):
|
| 271 |
+
if not isinstance(lsttype, ListType):
|
| 272 |
+
raise TypeError('*lsttype* must be a ListType')
|
| 273 |
+
|
| 274 |
+
if meminfo is not None:
|
| 275 |
+
opaque = meminfo
|
| 276 |
+
else:
|
| 277 |
+
opaque = _make_list(lsttype.item_type, allocated=allocated)
|
| 278 |
+
return lsttype, opaque
|
| 279 |
+
|
| 280 |
+
@property
|
| 281 |
+
def _numba_type_(self):
|
| 282 |
+
if self._list_type is None:
|
| 283 |
+
raise TypeError("invalid operation on untyped list")
|
| 284 |
+
return self._list_type
|
| 285 |
+
|
| 286 |
+
@property
|
| 287 |
+
def _typed(self):
|
| 288 |
+
"""Returns True if the list is typed.
|
| 289 |
+
"""
|
| 290 |
+
return self._list_type is not None
|
| 291 |
+
|
| 292 |
+
@property
|
| 293 |
+
def _dtype(self):
|
| 294 |
+
if not self._typed:
|
| 295 |
+
raise RuntimeError("invalid operation on untyped list")
|
| 296 |
+
return self._list_type.dtype
|
| 297 |
+
|
| 298 |
+
def _initialise_list(self, item):
|
| 299 |
+
lsttype = types.ListType(typeof(item))
|
| 300 |
+
self._list_type, self._opaque = self._parse_arg(lsttype)
|
| 301 |
+
|
| 302 |
+
def __len__(self) -> int:
|
| 303 |
+
if not self._typed:
|
| 304 |
+
return 0
|
| 305 |
+
else:
|
| 306 |
+
return _length(self)
|
| 307 |
+
|
| 308 |
+
def _allocated(self):
|
| 309 |
+
if not self._typed:
|
| 310 |
+
return DEFAULT_ALLOCATED
|
| 311 |
+
else:
|
| 312 |
+
return _allocated(self)
|
| 313 |
+
|
| 314 |
+
def _is_mutable(self):
|
| 315 |
+
return _is_mutable(self)
|
| 316 |
+
|
| 317 |
+
def _make_mutable(self):
|
| 318 |
+
return _make_mutable(self)
|
| 319 |
+
|
| 320 |
+
def _make_immutable(self):
|
| 321 |
+
return _make_immutable(self)
|
| 322 |
+
|
| 323 |
+
def __eq__(self, other):
|
| 324 |
+
return _eq(self, other)
|
| 325 |
+
|
| 326 |
+
def __ne__(self, other):
|
| 327 |
+
return _ne(self, other)
|
| 328 |
+
|
| 329 |
+
def __lt__(self, other):
|
| 330 |
+
return _lt(self, other)
|
| 331 |
+
|
| 332 |
+
def __le__(self, other):
|
| 333 |
+
return _le(self, other)
|
| 334 |
+
|
| 335 |
+
def __gt__(self, other):
|
| 336 |
+
return _gt(self, other)
|
| 337 |
+
|
| 338 |
+
def __ge__(self, other):
|
| 339 |
+
return _ge(self, other)
|
| 340 |
+
|
| 341 |
+
def append(self, item: T) -> None:
|
| 342 |
+
if not self._typed:
|
| 343 |
+
self._initialise_list(item)
|
| 344 |
+
_append(self, item)
|
| 345 |
+
|
| 346 |
+
# noqa F811 comments required due to github.com/PyCQA/pyflakes/issues/592
|
| 347 |
+
# noqa E704 required to follow overload style of using ... in the same line
|
| 348 |
+
@pt.overload # type: ignore[override]
|
| 349 |
+
def __setitem__(self, i: int, o: T) -> None: ... # noqa: F811, E704
|
| 350 |
+
@pt.overload
|
| 351 |
+
def __setitem__(self, s: slice, o: 'List[T]') -> None: ... # noqa: F811, E704, E501
|
| 352 |
+
|
| 353 |
+
def __setitem__(self, i: Int_or_Slice, item: T_or_ListT) -> None: # noqa: F811, E501
|
| 354 |
+
if not self._typed:
|
| 355 |
+
self._initialise_list(item)
|
| 356 |
+
_setitem(self, i, item)
|
| 357 |
+
|
| 358 |
+
# noqa F811 comments required due to github.com/PyCQA/pyflakes/issues/592
|
| 359 |
+
# noqa E704 required to follow overload style of using ... in the same line
|
| 360 |
+
@pt.overload
|
| 361 |
+
def __getitem__(self, i: int) -> T: ... # noqa: F811, E704
|
| 362 |
+
@pt.overload
|
| 363 |
+
def __getitem__(self, i: slice) -> 'List[T]': ... # noqa: F811, E704
|
| 364 |
+
|
| 365 |
+
def __getitem__(self, i: Int_or_Slice) -> T_or_ListT: # noqa: F811
|
| 366 |
+
if not self._typed:
|
| 367 |
+
raise IndexError
|
| 368 |
+
else:
|
| 369 |
+
return _getitem(self, i)
|
| 370 |
+
|
| 371 |
+
def __iter__(self) -> pt.Iterator[T]:
|
| 372 |
+
for i in range(len(self)):
|
| 373 |
+
yield self[i]
|
| 374 |
+
|
| 375 |
+
def __contains__(self, item: T) -> bool: # type: ignore[override]
|
| 376 |
+
return _contains(self, item)
|
| 377 |
+
|
| 378 |
+
def __delitem__(self, i: Int_or_Slice) -> None:
|
| 379 |
+
_delitem(self, i)
|
| 380 |
+
|
| 381 |
+
def insert(self, i: int, item: T) -> None:
|
| 382 |
+
if not self._typed:
|
| 383 |
+
self._initialise_list(item)
|
| 384 |
+
_insert(self, i, item)
|
| 385 |
+
|
| 386 |
+
def count(self, item: T) -> int:
|
| 387 |
+
return _count(self, item)
|
| 388 |
+
|
| 389 |
+
def pop(self, i: "pt.SupportsIndex" = -1) -> T:
|
| 390 |
+
return _pop(self, i)
|
| 391 |
+
|
| 392 |
+
def extend(self, iterable: "_Sequence[T]") -> None: #type: ignore[override]
|
| 393 |
+
# Empty iterable, do nothing
|
| 394 |
+
if len(iterable) == 0:
|
| 395 |
+
return None
|
| 396 |
+
if not self._typed:
|
| 397 |
+
# Need to get the first element of the iterable to initialise the
|
| 398 |
+
# type of the list. FIXME: this may be a problem if the iterable
|
| 399 |
+
# can not be sliced.
|
| 400 |
+
self._initialise_list(iterable[0])
|
| 401 |
+
return _extend(self, iterable)
|
| 402 |
+
|
| 403 |
+
def remove(self, item: T) -> None:
|
| 404 |
+
return _remove(self, item)
|
| 405 |
+
|
| 406 |
+
def clear(self):
|
| 407 |
+
return _clear(self)
|
| 408 |
+
|
| 409 |
+
def reverse(self):
|
| 410 |
+
return _reverse(self)
|
| 411 |
+
|
| 412 |
+
def copy(self):
|
| 413 |
+
return _copy(self)
|
| 414 |
+
|
| 415 |
+
def index(self, item: T, start: pt.Optional[int] = None,
|
| 416 |
+
stop: pt.Optional[int] = None) -> int:
|
| 417 |
+
return _index(self, item, start, stop)
|
| 418 |
+
|
| 419 |
+
def sort(self, key=None, reverse=False):
|
| 420 |
+
"""Sort the list inplace.
|
| 421 |
+
|
| 422 |
+
See also ``list.sort()``
|
| 423 |
+
"""
|
| 424 |
+
# If key is not already a dispatcher object, make it so
|
| 425 |
+
if callable(key) and not isinstance(key, Dispatcher):
|
| 426 |
+
key = njit(key)
|
| 427 |
+
return _sort(self, key, reverse)
|
| 428 |
+
|
| 429 |
+
def __str__(self):
|
| 430 |
+
buf = []
|
| 431 |
+
for x in self:
|
| 432 |
+
buf.append("{}".format(x))
|
| 433 |
+
# Check whether the code was invoked from IPython shell
|
| 434 |
+
try:
|
| 435 |
+
get_ipython
|
| 436 |
+
preview = ', '.join(buf[:1000])
|
| 437 |
+
suffix = ', ...' if len(buf) > 1000 else ''
|
| 438 |
+
return '[{0}{1}]'.format(preview, suffix)
|
| 439 |
+
except (NameError, IndexError):
|
| 440 |
+
return '[{0}]'.format(', '.join(buf))
|
| 441 |
+
|
| 442 |
+
def __repr__(self):
|
| 443 |
+
body = str(self)
|
| 444 |
+
prefix = str(self._list_type) if self._typed else "ListType[Undefined]"
|
| 445 |
+
return "{prefix}({body})".format(prefix=prefix, body=body)
|
| 446 |
+
|
| 447 |
+
|
| 448 |
+
@overload_classmethod(ListType, 'empty_list')
|
| 449 |
+
def typedlist_empty(cls, item_type, allocated=DEFAULT_ALLOCATED):
|
| 450 |
+
if cls.instance_type is not ListType:
|
| 451 |
+
return
|
| 452 |
+
|
| 453 |
+
def impl(cls, item_type, allocated=DEFAULT_ALLOCATED):
|
| 454 |
+
return listobject.new_list(item_type, allocated=allocated)
|
| 455 |
+
|
| 456 |
+
return impl
|
| 457 |
+
|
| 458 |
+
|
| 459 |
+
@box(types.ListType)
|
| 460 |
+
def box_lsttype(typ, val, c):
|
| 461 |
+
context = c.context
|
| 462 |
+
builder = c.builder
|
| 463 |
+
|
| 464 |
+
# XXX deduplicate
|
| 465 |
+
ctor = cgutils.create_struct_proxy(typ)
|
| 466 |
+
lstruct = ctor(context, builder, value=val)
|
| 467 |
+
# Returns the plain MemInfo
|
| 468 |
+
boxed_meminfo = c.box(
|
| 469 |
+
types.MemInfoPointer(types.voidptr),
|
| 470 |
+
lstruct.meminfo,
|
| 471 |
+
)
|
| 472 |
+
|
| 473 |
+
modname = c.context.insert_const_string(
|
| 474 |
+
c.builder.module, 'numba.typed.typedlist',
|
| 475 |
+
)
|
| 476 |
+
typedlist_mod = c.pyapi.import_module(modname)
|
| 477 |
+
fmp_fn = c.pyapi.object_getattr_string(typedlist_mod, '_from_meminfo_ptr')
|
| 478 |
+
|
| 479 |
+
lsttype_obj = c.pyapi.unserialize(c.pyapi.serialize_object(typ))
|
| 480 |
+
|
| 481 |
+
result_var = builder.alloca(c.pyapi.pyobj)
|
| 482 |
+
builder.store(cgutils.get_null_value(c.pyapi.pyobj), result_var)
|
| 483 |
+
|
| 484 |
+
with builder.if_then(cgutils.is_not_null(builder, lsttype_obj)):
|
| 485 |
+
res = c.pyapi.call_function_objargs(
|
| 486 |
+
fmp_fn, (boxed_meminfo, lsttype_obj),
|
| 487 |
+
)
|
| 488 |
+
c.pyapi.decref(fmp_fn)
|
| 489 |
+
c.pyapi.decref(typedlist_mod)
|
| 490 |
+
c.pyapi.decref(boxed_meminfo)
|
| 491 |
+
builder.store(res, result_var)
|
| 492 |
+
return builder.load(result_var)
|
| 493 |
+
|
| 494 |
+
|
| 495 |
+
@unbox(types.ListType)
|
| 496 |
+
def unbox_listtype(typ, val, c):
|
| 497 |
+
context = c.context
|
| 498 |
+
builder = c.builder
|
| 499 |
+
|
| 500 |
+
# Check that `type(val) is Dict`
|
| 501 |
+
list_type = c.pyapi.unserialize(c.pyapi.serialize_object(List))
|
| 502 |
+
valtype = c.pyapi.object_type(val)
|
| 503 |
+
same_type = builder.icmp_unsigned("==", valtype, list_type)
|
| 504 |
+
|
| 505 |
+
with c.builder.if_else(same_type) as (then, orelse):
|
| 506 |
+
with then:
|
| 507 |
+
miptr = c.pyapi.object_getattr_string(val, '_opaque')
|
| 508 |
+
|
| 509 |
+
native = c.unbox(types.MemInfoPointer(types.voidptr), miptr)
|
| 510 |
+
|
| 511 |
+
mi = native.value
|
| 512 |
+
ctor = cgutils.create_struct_proxy(typ)
|
| 513 |
+
lstruct = ctor(context, builder)
|
| 514 |
+
|
| 515 |
+
data_pointer = context.nrt.meminfo_data(builder, mi)
|
| 516 |
+
data_pointer = builder.bitcast(
|
| 517 |
+
data_pointer,
|
| 518 |
+
listobject.ll_list_type.as_pointer(),
|
| 519 |
+
)
|
| 520 |
+
|
| 521 |
+
lstruct.data = builder.load(data_pointer)
|
| 522 |
+
lstruct.meminfo = mi
|
| 523 |
+
|
| 524 |
+
lstobj = lstruct._getvalue()
|
| 525 |
+
c.pyapi.decref(miptr)
|
| 526 |
+
bb_unboxed = c.builder.basic_block
|
| 527 |
+
|
| 528 |
+
with orelse:
|
| 529 |
+
# Raise error on incorrect type
|
| 530 |
+
c.pyapi.err_format(
|
| 531 |
+
"PyExc_TypeError",
|
| 532 |
+
"can't unbox a %S as a %S",
|
| 533 |
+
valtype, list_type,
|
| 534 |
+
)
|
| 535 |
+
bb_else = c.builder.basic_block
|
| 536 |
+
|
| 537 |
+
# Phi nodes to gather the output
|
| 538 |
+
lstobj_res = c.builder.phi(lstobj.type)
|
| 539 |
+
is_error_res = c.builder.phi(cgutils.bool_t)
|
| 540 |
+
|
| 541 |
+
lstobj_res.add_incoming(lstobj, bb_unboxed)
|
| 542 |
+
lstobj_res.add_incoming(lstobj.type(None), bb_else)
|
| 543 |
+
|
| 544 |
+
is_error_res.add_incoming(cgutils.false_bit, bb_unboxed)
|
| 545 |
+
is_error_res.add_incoming(cgutils.true_bit, bb_else)
|
| 546 |
+
|
| 547 |
+
# cleanup
|
| 548 |
+
c.pyapi.decref(list_type)
|
| 549 |
+
c.pyapi.decref(valtype)
|
| 550 |
+
|
| 551 |
+
return NativeValue(lstobj_res, is_error=is_error_res)
|
| 552 |
+
|
| 553 |
+
|
| 554 |
+
#
|
| 555 |
+
# The following contains the logic for the type-inferred constructor
|
| 556 |
+
#
|
| 557 |
+
|
| 558 |
+
def _guess_dtype(iterable):
|
| 559 |
+
"""Guess the correct dtype of the iterable type. """
|
| 560 |
+
if not isinstance(iterable, types.IterableType):
|
| 561 |
+
raise TypingError(
|
| 562 |
+
"List() argument must be iterable")
|
| 563 |
+
# Special case for nested NumPy arrays.
|
| 564 |
+
elif isinstance(iterable, types.Array) and iterable.ndim > 1:
|
| 565 |
+
return iterable.copy(ndim=iterable.ndim - 1, layout='A')
|
| 566 |
+
elif hasattr(iterable, "dtype"):
|
| 567 |
+
return iterable.dtype
|
| 568 |
+
elif hasattr(iterable, "yield_type"):
|
| 569 |
+
return iterable.yield_type
|
| 570 |
+
elif isinstance(iterable, types.UnicodeType):
|
| 571 |
+
return iterable
|
| 572 |
+
elif isinstance(iterable, types.DictType):
|
| 573 |
+
return iterable.key_type
|
| 574 |
+
else:
|
| 575 |
+
# This should never happen, since the 'dtype' of any iterable
|
| 576 |
+
# should have determined above.
|
| 577 |
+
raise TypingError(
|
| 578 |
+
"List() argument does not have a suitable dtype")
|
| 579 |
+
|
| 580 |
+
|
| 581 |
+
@type_callable(ListType)
|
| 582 |
+
def typedlist_call(context):
|
| 583 |
+
"""Defines typing logic for ``List()`` and ``List(iterable)``.
|
| 584 |
+
|
| 585 |
+
If no argument is given, the returned typer types a new typed-list with an
|
| 586 |
+
undefined item type. If a single argument is given it must be iterable with
|
| 587 |
+
a guessable 'dtype'. In this case, the typer types a new typed-list with
|
| 588 |
+
the type set to the 'dtype' of the iterable arg.
|
| 589 |
+
|
| 590 |
+
Parameters
|
| 591 |
+
----------
|
| 592 |
+
arg : single iterable (optional)
|
| 593 |
+
The single optional argument.
|
| 594 |
+
|
| 595 |
+
Returns
|
| 596 |
+
-------
|
| 597 |
+
typer : function
|
| 598 |
+
A typer suitable to type constructor calls.
|
| 599 |
+
|
| 600 |
+
Raises
|
| 601 |
+
------
|
| 602 |
+
The returned typer raises a TypingError in case of unsuitable arguments.
|
| 603 |
+
|
| 604 |
+
"""
|
| 605 |
+
|
| 606 |
+
class Typer(object):
|
| 607 |
+
|
| 608 |
+
def attach_sig(self):
|
| 609 |
+
from inspect import signature as mypysig
|
| 610 |
+
|
| 611 |
+
def mytyper(iterable):
|
| 612 |
+
pass
|
| 613 |
+
self.pysig = mypysig(mytyper)
|
| 614 |
+
|
| 615 |
+
def __call__(self, *args, **kwargs):
|
| 616 |
+
if kwargs:
|
| 617 |
+
raise TypingError(
|
| 618 |
+
"List() takes no keyword arguments"
|
| 619 |
+
)
|
| 620 |
+
elif args:
|
| 621 |
+
if not 0 <= len(args) <= 1:
|
| 622 |
+
raise TypingError(
|
| 623 |
+
"List() expected at most 1 argument, got {}"
|
| 624 |
+
.format(len(args))
|
| 625 |
+
)
|
| 626 |
+
rt = types.ListType(_guess_dtype(args[0]))
|
| 627 |
+
self.attach_sig()
|
| 628 |
+
return Signature(rt, args, None, pysig=self.pysig)
|
| 629 |
+
else:
|
| 630 |
+
item_type = types.undefined
|
| 631 |
+
return types.ListType(item_type)
|
| 632 |
+
|
| 633 |
+
return Typer()
|
| 634 |
+
|
| 635 |
+
|
| 636 |
+
@overload(numba_typeref_ctor)
|
| 637 |
+
def impl_numba_typeref_ctor(cls, *args):
|
| 638 |
+
"""Defines lowering for ``List()`` and ``List(iterable)``.
|
| 639 |
+
|
| 640 |
+
This defines the lowering logic to instantiate either an empty typed-list
|
| 641 |
+
or a typed-list initialised with values from a single iterable argument.
|
| 642 |
+
|
| 643 |
+
Parameters
|
| 644 |
+
----------
|
| 645 |
+
cls : TypeRef
|
| 646 |
+
Expecting a TypeRef of a precise ListType.
|
| 647 |
+
args: tuple
|
| 648 |
+
A tuple that contains a single iterable (optional)
|
| 649 |
+
|
| 650 |
+
Returns
|
| 651 |
+
-------
|
| 652 |
+
impl : function
|
| 653 |
+
An implementation suitable for lowering the constructor call.
|
| 654 |
+
|
| 655 |
+
See also: `redirect_type_ctor` in numba/cpython/bulitins.py
|
| 656 |
+
"""
|
| 657 |
+
list_ty = cls.instance_type
|
| 658 |
+
if not isinstance(list_ty, types.ListType):
|
| 659 |
+
return # reject
|
| 660 |
+
# Ensure the list is precisely typed.
|
| 661 |
+
if not list_ty.is_precise():
|
| 662 |
+
msg = "expecting a precise ListType but got {}".format(list_ty)
|
| 663 |
+
raise LoweringError(msg)
|
| 664 |
+
|
| 665 |
+
item_type = types.TypeRef(list_ty.item_type)
|
| 666 |
+
if args:
|
| 667 |
+
# special case 0d Numpy arrays
|
| 668 |
+
if isinstance(args[0], types.Array) and args[0].ndim == 0:
|
| 669 |
+
def impl(cls, *args):
|
| 670 |
+
# Instantiate an empty list and populate it with the single
|
| 671 |
+
# value from the array.
|
| 672 |
+
r = List.empty_list(item_type)
|
| 673 |
+
r.append(args[0].item())
|
| 674 |
+
return r
|
| 675 |
+
else:
|
| 676 |
+
def impl(cls, *args):
|
| 677 |
+
# Instantiate an empty list and populate it with values from
|
| 678 |
+
# the iterable.
|
| 679 |
+
r = List.empty_list(item_type)
|
| 680 |
+
for i in args[0]:
|
| 681 |
+
r.append(i)
|
| 682 |
+
return r
|
| 683 |
+
else:
|
| 684 |
+
def impl(cls, *args):
|
| 685 |
+
# Simply call .empty_list with the item type from *cls*
|
| 686 |
+
return List.empty_list(item_type)
|
| 687 |
+
|
| 688 |
+
return impl
|
lib/python3.10/site-packages/tf2onnx/__init__.py
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
|
| 3 |
+
"""tf2onnx package."""
|
| 4 |
+
|
| 5 |
+
from __future__ import division
|
| 6 |
+
from __future__ import print_function
|
| 7 |
+
from __future__ import unicode_literals
|
| 8 |
+
|
| 9 |
+
__all__ = ["utils", "graph_matcher", "graph", "graph_builder",
|
| 10 |
+
"tfonnx", "shape_inference", "schemas", "tf_utils", "tf_loader", "convert"]
|
| 11 |
+
|
| 12 |
+
import onnx
|
| 13 |
+
from .version import version as __version__
|
| 14 |
+
from . import verbose_logging as logging
|
| 15 |
+
from tf2onnx import tfonnx, utils, graph, graph_builder, graph_matcher, shape_inference, schemas, convert # pylint: disable=wrong-import-order
|
| 16 |
+
#from tf2onnx import tf_utils, tf_loader
|
lib/python3.10/site-packages/tf2onnx/constants.py
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
"""
|
| 5 |
+
common constants
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
from onnx import helper
|
| 9 |
+
|
| 10 |
+
TF2ONNX_PACKAGE_NAME = __name__.split('.')[0]
|
| 11 |
+
|
| 12 |
+
# Built-in supported domains
|
| 13 |
+
ONNX_DOMAIN = ""
|
| 14 |
+
AI_ONNX_ML_DOMAIN = "ai.onnx.ml"
|
| 15 |
+
MICROSOFT_DOMAIN = "com.microsoft"
|
| 16 |
+
CONTRIB_OPS_DOMAIN = "ai.onnx.contrib"
|
| 17 |
+
|
| 18 |
+
# Default opset version for onnx domain
|
| 19 |
+
PREFERRED_OPSET = 9
|
| 20 |
+
|
| 21 |
+
# Default opset for custom ops
|
| 22 |
+
TENSORFLOW_OPSET = helper.make_opsetid("ai.onnx.converters.tensorflow", 1)
|
| 23 |
+
|
| 24 |
+
# Target for the generated onnx graph. It possible targets:
|
| 25 |
+
# onnx-1.1 = onnx at v1.1 (winml in rs4 is based on this)
|
| 26 |
+
# caffe2 = include some workarounds for caffe2 and winml
|
| 27 |
+
TARGET_RS4 = "rs4"
|
| 28 |
+
TARGET_RS5 = "rs5"
|
| 29 |
+
TARGET_RS6 = "rs6"
|
| 30 |
+
TARGET_CAFFE2 = "caffe2"
|
| 31 |
+
TARGET_TENSORRT = "tensorrt"
|
| 32 |
+
POSSIBLE_TARGETS = [TARGET_RS4, TARGET_RS5, TARGET_RS6, TARGET_CAFFE2, TARGET_TENSORRT]
|
| 33 |
+
DEFAULT_TARGET = []
|
| 34 |
+
|
| 35 |
+
NCHW_TO_NHWC = [0, 2, 3, 1]
|
| 36 |
+
NHWC_TO_NCHW = [0, 3, 1, 2]
|
| 37 |
+
NDHWC_TO_NCDHW = [0, 4, 1, 2, 3]
|
| 38 |
+
NCDHW_TO_NDHWC = [0, 2, 3, 4, 1]
|
| 39 |
+
HWCN_TO_NCHW = [3, 2, 0, 1]
|
| 40 |
+
NCHW_TO_HWCN = [2, 3, 1, 0]
|
| 41 |
+
|
| 42 |
+
# Environment variables
|
| 43 |
+
ENV_TF2ONNX_DEBUG_MODE = "TF2ONNX_DEBUG_MODE"
|
| 44 |
+
|
| 45 |
+
# Mapping opset to IR version.
|
| 46 |
+
# Note: opset 7 and opset 8 came out with IR3 but we need IR4 because of PlaceholderWithDefault
|
| 47 |
+
OPSET_TO_IR_VERSION = {
|
| 48 |
+
1: 3, 2: 3, 3: 3, 4: 3, 5: 3, 6: 3, 7: 4, 8: 4, 9: 4, 10: 5, 11: 6, 12: 7, 13: 7
|
| 49 |
+
}
|
lib/python3.10/site-packages/tf2onnx/convert.py
ADDED
|
@@ -0,0 +1,467 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
"""
|
| 5 |
+
python -m tf2onnx.convert : api and commandline tool to convert a tensorflow model to onnx
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
from __future__ import division
|
| 9 |
+
from __future__ import print_function
|
| 10 |
+
from __future__ import unicode_literals
|
| 11 |
+
|
| 12 |
+
# pylint: disable=unused-argument,unused-import,ungrouped-imports,wrong-import-position
|
| 13 |
+
|
| 14 |
+
import argparse
|
| 15 |
+
import os
|
| 16 |
+
import sys
|
| 17 |
+
from distutils.version import LooseVersion
|
| 18 |
+
|
| 19 |
+
os.environ['TF_CPP_MIN_LOG_LEVEL'] = "3"
|
| 20 |
+
|
| 21 |
+
import tensorflow as tf
|
| 22 |
+
|
| 23 |
+
from tf2onnx.tfonnx import process_tf_graph
|
| 24 |
+
from tf2onnx import constants, logging, utils, optimizer
|
| 25 |
+
from tf2onnx import tf_loader
|
| 26 |
+
from tf2onnx.graph import ExternalTensorStorage
|
| 27 |
+
from tf2onnx.tf_utils import compress_graph_def
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
# pylint: disable=unused-argument
|
| 32 |
+
|
| 33 |
+
_HELP_TEXT = """
|
| 34 |
+
Usage Examples:
|
| 35 |
+
|
| 36 |
+
python -m tf2onnx.convert --saved-model saved_model_dir --output model.onnx
|
| 37 |
+
python -m tf2onnx.convert --input frozen_graph.pb --inputs X:0 --outputs output:0 --output model.onnx
|
| 38 |
+
python -m tf2onnx.convert --checkpoint checkpoint.meta --inputs X:0 --outputs output:0 --output model.onnx
|
| 39 |
+
|
| 40 |
+
For help and additional information see:
|
| 41 |
+
https://github.com/onnx/tensorflow-onnx
|
| 42 |
+
|
| 43 |
+
If you run into issues, open an issue here:
|
| 44 |
+
https://github.com/onnx/tensorflow-onnx/issues
|
| 45 |
+
"""
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
def get_args():
|
| 49 |
+
"""Parse commandline."""
|
| 50 |
+
parser = argparse.ArgumentParser(description="Convert tensorflow graphs to ONNX.",
|
| 51 |
+
formatter_class=argparse.RawDescriptionHelpFormatter, epilog=_HELP_TEXT)
|
| 52 |
+
parser.add_argument("--input", help="input from graphdef")
|
| 53 |
+
parser.add_argument("--graphdef", help="input from graphdef")
|
| 54 |
+
parser.add_argument("--saved-model", help="input from saved model")
|
| 55 |
+
parser.add_argument("--tag", help="tag to use for saved_model")
|
| 56 |
+
parser.add_argument("--signature_def", help="signature_def from saved_model to use")
|
| 57 |
+
parser.add_argument("--concrete_function", type=int, default=None,
|
| 58 |
+
help="For TF2.x saved_model, index of func signature in __call__ (--signature_def is ignored)")
|
| 59 |
+
parser.add_argument("--checkpoint", help="input from checkpoint")
|
| 60 |
+
parser.add_argument("--keras", help="input from keras model")
|
| 61 |
+
parser.add_argument("--tflite", help="input from tflite model")
|
| 62 |
+
parser.add_argument("--large_model", help="use the large model format (for models > 2GB)", action="store_true")
|
| 63 |
+
parser.add_argument("--output", help="output model file")
|
| 64 |
+
parser.add_argument("--inputs", help="model input_names (optional for saved_model, keras, and tflite)")
|
| 65 |
+
parser.add_argument("--outputs", help="model output_names (optional for saved_model, keras, and tflite)")
|
| 66 |
+
parser.add_argument("--ignore_default", help="comma-separated list of names of PlaceholderWithDefault "
|
| 67 |
+
"ops to change into Placeholder ops")
|
| 68 |
+
parser.add_argument("--use_default", help="comma-separated list of names of PlaceholderWithDefault ops to "
|
| 69 |
+
"change into Identity ops using their default value")
|
| 70 |
+
parser.add_argument("--rename-inputs", help="input names to use in final model (optional)")
|
| 71 |
+
parser.add_argument("--rename-outputs", help="output names to use in final model (optional)")
|
| 72 |
+
parser.add_argument("--opset", type=int, default=None, help="opset version to use for onnx domain")
|
| 73 |
+
parser.add_argument("--dequantize", help="Remove quantization from model. Only supported for tflite currently.",
|
| 74 |
+
action="store_true")
|
| 75 |
+
parser.add_argument("--custom-ops", help="comma-separated map of custom ops to domains in format OpName:domain")
|
| 76 |
+
parser.add_argument("--extra_opset", default=None,
|
| 77 |
+
help="extra opset with format like domain:version, e.g. com.microsoft:1")
|
| 78 |
+
parser.add_argument("--target", default=",".join(constants.DEFAULT_TARGET), choices=constants.POSSIBLE_TARGETS,
|
| 79 |
+
help="target platform")
|
| 80 |
+
parser.add_argument("--continue_on_error", help="continue_on_error", action="store_true")
|
| 81 |
+
parser.add_argument("--verbose", "-v", help="verbose output, option is additive", action="count")
|
| 82 |
+
parser.add_argument("--debug", help="debug mode", action="store_true")
|
| 83 |
+
parser.add_argument("--output_frozen_graph", help="output frozen tf graph to file")
|
| 84 |
+
parser.add_argument("--fold_const", help="Deprecated. Constant folding is always enabled.",
|
| 85 |
+
action="store_true")
|
| 86 |
+
# experimental
|
| 87 |
+
parser.add_argument("--inputs-as-nchw", help="transpose inputs as from nhwc to nchw")
|
| 88 |
+
args = parser.parse_args()
|
| 89 |
+
|
| 90 |
+
args.shape_override = None
|
| 91 |
+
if args.input:
|
| 92 |
+
# for backward compativility
|
| 93 |
+
args.graphdef = args.input
|
| 94 |
+
if args.graphdef or args.checkpoint:
|
| 95 |
+
if not args.inputs or not args.outputs:
|
| 96 |
+
parser.error("graphdef and checkpoint models need to provide inputs and outputs")
|
| 97 |
+
if not any([args.graphdef, args.checkpoint, args.saved_model, args.keras, args.tflite]):
|
| 98 |
+
parser.print_help()
|
| 99 |
+
sys.exit(1)
|
| 100 |
+
if args.inputs:
|
| 101 |
+
args.inputs, args.shape_override = utils.split_nodename_and_shape(args.inputs)
|
| 102 |
+
if args.outputs:
|
| 103 |
+
args.outputs = args.outputs.split(",")
|
| 104 |
+
if args.ignore_default:
|
| 105 |
+
args.ignore_default = args.ignore_default.split(",")
|
| 106 |
+
if args.use_default:
|
| 107 |
+
args.use_default = args.use_default.split(",")
|
| 108 |
+
if args.rename_outputs:
|
| 109 |
+
args.rename_outputs = args.rename_outputs.split(",")
|
| 110 |
+
if args.rename_inputs:
|
| 111 |
+
args.rename_inputs = args.rename_inputs.split(",")
|
| 112 |
+
if args.inputs_as_nchw:
|
| 113 |
+
args.inputs_as_nchw = args.inputs_as_nchw.split(",")
|
| 114 |
+
if args.target:
|
| 115 |
+
args.target = args.target.split(",")
|
| 116 |
+
if args.signature_def:
|
| 117 |
+
args.signature_def = [args.signature_def]
|
| 118 |
+
if args.dequantize:
|
| 119 |
+
if not args.tflite:
|
| 120 |
+
parser.error("dequantize flag is currently only supported for tflite")
|
| 121 |
+
if args.extra_opset:
|
| 122 |
+
tokens = args.extra_opset.split(':')
|
| 123 |
+
if len(tokens) != 2:
|
| 124 |
+
parser.error("invalid extra_opset argument")
|
| 125 |
+
args.extra_opset = [utils.make_opsetid(tokens[0], int(tokens[1]))]
|
| 126 |
+
|
| 127 |
+
return args
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
def make_default_custom_op_handler(domain):
|
| 131 |
+
def default_custom_op_handler(ctx, node, name, args):
|
| 132 |
+
node.domain = domain
|
| 133 |
+
return node
|
| 134 |
+
return default_custom_op_handler
|
| 135 |
+
|
| 136 |
+
|
| 137 |
+
def _convert_common(frozen_graph, name="unknown", large_model=False, output_path=None,
|
| 138 |
+
output_frozen_graph=None, **kwargs):
|
| 139 |
+
"""Common processing for conversion."""
|
| 140 |
+
|
| 141 |
+
model_proto = None
|
| 142 |
+
external_tensor_storage = None
|
| 143 |
+
const_node_values = None
|
| 144 |
+
|
| 145 |
+
with tf.Graph().as_default() as tf_graph:
|
| 146 |
+
if large_model:
|
| 147 |
+
const_node_values = compress_graph_def(frozen_graph)
|
| 148 |
+
external_tensor_storage = ExternalTensorStorage()
|
| 149 |
+
if output_frozen_graph:
|
| 150 |
+
utils.save_protobuf(output_frozen_graph, frozen_graph)
|
| 151 |
+
if not kwargs.get("tflite_path"):
|
| 152 |
+
tf.import_graph_def(frozen_graph, name='')
|
| 153 |
+
g = process_tf_graph(tf_graph, const_node_values=const_node_values, **kwargs)
|
| 154 |
+
onnx_graph = optimizer.optimize_graph(g)
|
| 155 |
+
model_proto = onnx_graph.make_model("converted from {}".format(name),
|
| 156 |
+
external_tensor_storage=external_tensor_storage)
|
| 157 |
+
if output_path:
|
| 158 |
+
if large_model:
|
| 159 |
+
utils.save_onnx_zip(output_path, model_proto, external_tensor_storage)
|
| 160 |
+
else:
|
| 161 |
+
utils.save_protobuf(output_path, model_proto)
|
| 162 |
+
|
| 163 |
+
return model_proto, external_tensor_storage
|
| 164 |
+
|
| 165 |
+
|
| 166 |
+
def main():
|
| 167 |
+
args = get_args()
|
| 168 |
+
logging.basicConfig(level=logging.get_verbosity_level(args.verbose))
|
| 169 |
+
if args.debug:
|
| 170 |
+
utils.set_debug_mode(True)
|
| 171 |
+
|
| 172 |
+
logger = logging.getLogger(constants.TF2ONNX_PACKAGE_NAME)
|
| 173 |
+
|
| 174 |
+
extra_opset = args.extra_opset or []
|
| 175 |
+
tflite_path = None
|
| 176 |
+
custom_ops = {}
|
| 177 |
+
initialized_tables = None
|
| 178 |
+
tensors_to_rename = {}
|
| 179 |
+
if args.custom_ops:
|
| 180 |
+
using_tf_opset = False
|
| 181 |
+
for op in args.custom_ops.split(","):
|
| 182 |
+
if ":" in op:
|
| 183 |
+
op, domain = op.split(":")
|
| 184 |
+
else:
|
| 185 |
+
# default custom ops for tensorflow-onnx are in the "tf" namespace
|
| 186 |
+
using_tf_opset = True
|
| 187 |
+
domain = constants.TENSORFLOW_OPSET.domain
|
| 188 |
+
custom_ops[op] = (make_default_custom_op_handler(domain), [])
|
| 189 |
+
if using_tf_opset:
|
| 190 |
+
extra_opset.append(constants.TENSORFLOW_OPSET)
|
| 191 |
+
|
| 192 |
+
if any(opset.domain == constants.CONTRIB_OPS_DOMAIN for opset in extra_opset):
|
| 193 |
+
try:
|
| 194 |
+
import tensorflow_text # pylint: disable=import-outside-toplevel
|
| 195 |
+
except ModuleNotFoundError:
|
| 196 |
+
logger.warning("tensorflow_text not installed. Model will fail to load if tensorflow_text ops are used.")
|
| 197 |
+
|
| 198 |
+
# get the frozen tensorflow model from graphdef, checkpoint or saved_model.
|
| 199 |
+
graph_def = None
|
| 200 |
+
inputs = None
|
| 201 |
+
outputs = None
|
| 202 |
+
model_path = None
|
| 203 |
+
|
| 204 |
+
if args.graphdef:
|
| 205 |
+
graph_def, inputs, outputs = tf_loader.from_graphdef(args.graphdef, args.inputs, args.outputs)
|
| 206 |
+
model_path = args.graphdef
|
| 207 |
+
if args.checkpoint:
|
| 208 |
+
graph_def, inputs, outputs = tf_loader.from_checkpoint(args.checkpoint, args.inputs, args.outputs)
|
| 209 |
+
model_path = args.checkpoint
|
| 210 |
+
if args.saved_model:
|
| 211 |
+
graph_def, inputs, outputs, initialized_tables, tensors_to_rename = tf_loader.from_saved_model(
|
| 212 |
+
args.saved_model, args.inputs, args.outputs, args.tag, args.signature_def, args.concrete_function,
|
| 213 |
+
args.large_model, return_initialized_tables=True, return_tensors_to_rename=True)
|
| 214 |
+
model_path = args.saved_model
|
| 215 |
+
if args.keras:
|
| 216 |
+
graph_def, inputs, outputs = tf_loader.from_keras(
|
| 217 |
+
args.keras, args.inputs, args.outputs)
|
| 218 |
+
model_path = args.keras
|
| 219 |
+
if args.tflite:
|
| 220 |
+
# Optional, but used to cut graph if provided.
|
| 221 |
+
inputs = args.inputs
|
| 222 |
+
outputs = args.outputs
|
| 223 |
+
tflite_path = args.tflite
|
| 224 |
+
model_path = tflite_path
|
| 225 |
+
|
| 226 |
+
if args.verbose:
|
| 227 |
+
logger.info("inputs: %s", inputs)
|
| 228 |
+
logger.info("outputs: %s", outputs)
|
| 229 |
+
|
| 230 |
+
if args.rename_inputs:
|
| 231 |
+
tensors_to_rename.update(zip(inputs, args.rename_inputs))
|
| 232 |
+
if args.rename_outputs:
|
| 233 |
+
tensors_to_rename.update(zip(outputs, args.rename_outputs))
|
| 234 |
+
|
| 235 |
+
with tf.device("/cpu:0"):
|
| 236 |
+
model_proto, _ = _convert_common(
|
| 237 |
+
graph_def,
|
| 238 |
+
name=model_path,
|
| 239 |
+
continue_on_error=args.continue_on_error,
|
| 240 |
+
target=args.target,
|
| 241 |
+
opset=args.opset,
|
| 242 |
+
custom_op_handlers=custom_ops,
|
| 243 |
+
extra_opset=extra_opset,
|
| 244 |
+
shape_override=args.shape_override,
|
| 245 |
+
input_names=inputs,
|
| 246 |
+
output_names=outputs,
|
| 247 |
+
inputs_as_nchw=args.inputs_as_nchw,
|
| 248 |
+
large_model=args.large_model,
|
| 249 |
+
tensors_to_rename=tensors_to_rename,
|
| 250 |
+
ignore_default=args.ignore_default,
|
| 251 |
+
use_default=args.use_default,
|
| 252 |
+
tflite_path=tflite_path,
|
| 253 |
+
dequantize=args.dequantize,
|
| 254 |
+
initialized_tables=initialized_tables,
|
| 255 |
+
output_frozen_graph=args.output_frozen_graph,
|
| 256 |
+
output_path=args.output)
|
| 257 |
+
|
| 258 |
+
|
| 259 |
+
# write onnx graph
|
| 260 |
+
logger.info("")
|
| 261 |
+
logger.info("Successfully converted TensorFlow model %s to ONNX", model_path)
|
| 262 |
+
|
| 263 |
+
logger.info("Model inputs: %s", [n.name for n in model_proto.graph.input])
|
| 264 |
+
logger.info("Model outputs: %s", [n.name for n in model_proto.graph.output])
|
| 265 |
+
if args.output:
|
| 266 |
+
if args.large_model:
|
| 267 |
+
logger.info("Zipped ONNX model is saved at %s. Unzip before opening in onnxruntime.", args.output)
|
| 268 |
+
else:
|
| 269 |
+
logger.info("ONNX model is saved at %s", args.output)
|
| 270 |
+
else:
|
| 271 |
+
logger.info("To export ONNX model to file, please run with `--output` option")
|
| 272 |
+
|
| 273 |
+
|
| 274 |
+
def tensor_names_from_structed(concrete_func, input_names, output_names):
|
| 275 |
+
tensors_to_rename = {}
|
| 276 |
+
args, kwargs = concrete_func.structured_input_signature
|
| 277 |
+
structured_inputs = [t.name for t in args if isinstance(t, tf.TensorSpec)] + sorted(kwargs.keys())
|
| 278 |
+
tensors_to_rename.update(zip(input_names, structured_inputs))
|
| 279 |
+
if isinstance(concrete_func.structured_outputs, dict):
|
| 280 |
+
for k, v in concrete_func.structured_outputs.items():
|
| 281 |
+
tensors_to_rename[v.name] = k
|
| 282 |
+
return tensors_to_rename
|
| 283 |
+
|
| 284 |
+
|
| 285 |
+
def from_keras(model, input_signature=None, opset=None, custom_ops=None, custom_op_handlers=None,
|
| 286 |
+
custom_rewriter=None, inputs_as_nchw=None, extra_opset=None, shape_override=None,
|
| 287 |
+
target=None, large_model=False, output_path=None):
|
| 288 |
+
"""Returns a ONNX model_proto for a tf.keras model.
|
| 289 |
+
|
| 290 |
+
Args:
|
| 291 |
+
model: the tf.keras model we want to convert
|
| 292 |
+
input_signature: a tf.TensorSpec or a numpy array defining the shape/dtype of the input
|
| 293 |
+
opset: the opset to be used for the ONNX model, default is the latest
|
| 294 |
+
target: list of workarounds applied to help certain platforms
|
| 295 |
+
custom_op_handlers: dictionary of custom ops handlers
|
| 296 |
+
custom_rewriter: list of custom graph rewriters
|
| 297 |
+
extra_opset: list of extra opset's, for example the opset's used by custom ops
|
| 298 |
+
shape_override: dict with inputs that override the shapes given by tensorflow
|
| 299 |
+
inputs_as_nchw: transpose inputs in list from nchw to nhwc
|
| 300 |
+
large_model: use the ONNX external tensor storage format
|
| 301 |
+
output_path: save model to output_path
|
| 302 |
+
|
| 303 |
+
Returns:
|
| 304 |
+
An ONNX model_proto and an external_tensor_storage dict.
|
| 305 |
+
"""
|
| 306 |
+
if LooseVersion(tf.__version__) < "2.0":
|
| 307 |
+
raise NotImplementedError("from_keras requires tf-2.0 or newer")
|
| 308 |
+
|
| 309 |
+
if not input_signature:
|
| 310 |
+
raise ValueError("from_keras requires input_signature")
|
| 311 |
+
|
| 312 |
+
from tensorflow.python.keras.saving import saving_utils as _saving_utils # pylint: disable=import-outside-toplevel
|
| 313 |
+
|
| 314 |
+
# let tensorflow do the checking if model is a valid model
|
| 315 |
+
function = _saving_utils.trace_model_call(model, input_signature)
|
| 316 |
+
concrete_func = function.get_concrete_function(*input_signature)
|
| 317 |
+
|
| 318 |
+
input_names = [input_tensor.name for input_tensor in concrete_func.inputs
|
| 319 |
+
if input_tensor.dtype != tf.dtypes.resource]
|
| 320 |
+
output_names = [output_tensor.name for output_tensor in concrete_func.outputs
|
| 321 |
+
if output_tensor.dtype != tf.dtypes.resource]
|
| 322 |
+
|
| 323 |
+
initialized_tables = None
|
| 324 |
+
tensors_to_rename = tensor_names_from_structed(concrete_func, input_names, output_names)
|
| 325 |
+
|
| 326 |
+
with tf.device("/cpu:0"):
|
| 327 |
+
frozen_graph = tf_loader.from_function(concrete_func, input_names, output_names, large_model=large_model)
|
| 328 |
+
model_proto, external_tensor_storage = _convert_common(
|
| 329 |
+
frozen_graph,
|
| 330 |
+
name=model.name,
|
| 331 |
+
continue_on_error=True,
|
| 332 |
+
target=None,
|
| 333 |
+
opset=opset,
|
| 334 |
+
custom_op_handlers=custom_ops,
|
| 335 |
+
extra_opset=extra_opset,
|
| 336 |
+
shape_override=shape_override,
|
| 337 |
+
input_names=input_names,
|
| 338 |
+
output_names=output_names,
|
| 339 |
+
inputs_as_nchw=inputs_as_nchw,
|
| 340 |
+
large_model=large_model,
|
| 341 |
+
tensors_to_rename=tensors_to_rename,
|
| 342 |
+
initialized_tables=initialized_tables,
|
| 343 |
+
output_path=output_path)
|
| 344 |
+
|
| 345 |
+
return model_proto, external_tensor_storage
|
| 346 |
+
|
| 347 |
+
|
| 348 |
+
def from_function(function, input_signature=None, opset=None, custom_ops=None, custom_op_handlers=None,
|
| 349 |
+
custom_rewriter=None, inputs_as_nchw=None, extra_opset=None, shape_override=None, target=None,
|
| 350 |
+
large_model=False, output_path=None):
|
| 351 |
+
"""Returns a ONNX model_proto for a tf.function.
|
| 352 |
+
|
| 353 |
+
Args:
|
| 354 |
+
function: the tf.function we want to convert
|
| 355 |
+
input_signature: a tf.TensorSpec or a numpy array defining the shape/dtype of the input
|
| 356 |
+
opset: the opset to be used for the ONNX model, default is the latest
|
| 357 |
+
target: list of workarounds applied to help certain platforms
|
| 358 |
+
custom_op_handlers: dictionary of custom ops handlers
|
| 359 |
+
custom_rewriter: list of custom graph rewriters
|
| 360 |
+
extra_opset: list of extra opset's, for example the opset's used by custom ops
|
| 361 |
+
shape_override: dict with inputs that override the shapes given by tensorflow
|
| 362 |
+
inputs_as_nchw: transpose inputs in list from nchw to nhwc
|
| 363 |
+
large_model: use the ONNX external tensor storage format
|
| 364 |
+
output_path: save model to output_path
|
| 365 |
+
|
| 366 |
+
Returns:
|
| 367 |
+
An ONNX model_proto and an external_tensor_storage dict.
|
| 368 |
+
"""
|
| 369 |
+
if LooseVersion(tf.__version__) < "2.0":
|
| 370 |
+
raise NotImplementedError("from_function requires tf-2.0 or newer")
|
| 371 |
+
|
| 372 |
+
if not input_signature:
|
| 373 |
+
raise ValueError("from_function requires input_signature")
|
| 374 |
+
|
| 375 |
+
concrete_func = function.get_concrete_function(*input_signature)
|
| 376 |
+
|
| 377 |
+
input_names = [input_tensor.name for input_tensor in concrete_func.inputs
|
| 378 |
+
if input_tensor.dtype != tf.dtypes.resource]
|
| 379 |
+
output_names = [output_tensor.name for output_tensor in concrete_func.outputs
|
| 380 |
+
if output_tensor.dtype != tf.dtypes.resource]
|
| 381 |
+
|
| 382 |
+
initialized_tables = None
|
| 383 |
+
tensors_to_rename = tensor_names_from_structed(concrete_func, input_names, output_names)
|
| 384 |
+
|
| 385 |
+
with tf.device("/cpu:0"):
|
| 386 |
+
frozen_graph = tf_loader.from_function(concrete_func, input_names, output_names, large_model=large_model)
|
| 387 |
+
model_proto, external_tensor_storage = _convert_common(
|
| 388 |
+
frozen_graph,
|
| 389 |
+
name=concrete_func.name,
|
| 390 |
+
continue_on_error=True,
|
| 391 |
+
target=None,
|
| 392 |
+
opset=opset,
|
| 393 |
+
custom_op_handlers=custom_ops,
|
| 394 |
+
extra_opset=extra_opset,
|
| 395 |
+
shape_override=shape_override,
|
| 396 |
+
input_names=input_names,
|
| 397 |
+
output_names=output_names,
|
| 398 |
+
inputs_as_nchw=inputs_as_nchw,
|
| 399 |
+
large_model=large_model,
|
| 400 |
+
tensors_to_rename=tensors_to_rename,
|
| 401 |
+
initialized_tables=initialized_tables,
|
| 402 |
+
output_path=output_path)
|
| 403 |
+
|
| 404 |
+
return model_proto, external_tensor_storage
|
| 405 |
+
|
| 406 |
+
|
| 407 |
+
def from_graph_def(graph_def, name=None, input_names=None, output_names=None, opset=None, custom_ops=None,
|
| 408 |
+
custom_op_handlers=None, custom_rewriter=None, inputs_as_nchw=None, extra_opset=None,
|
| 409 |
+
shape_override=None, target=None, large_model=False, tensors_to_rename=None, output_path=None):
|
| 410 |
+
"""Returns a ONNX model_proto for a tensorflow graphdef.
|
| 411 |
+
|
| 412 |
+
Args:
|
| 413 |
+
graph_def: the graphdef we want to convert
|
| 414 |
+
input_names: list of input names
|
| 415 |
+
output_names: list of output names
|
| 416 |
+
name: A name for the graph
|
| 417 |
+
opset: the opset to be used for the ONNX model, default is the latest
|
| 418 |
+
target: list of workarounds applied to help certain platforms
|
| 419 |
+
custom_op_handlers: dictionary of custom ops handlers
|
| 420 |
+
custom_rewriter: list of custom graph rewriters
|
| 421 |
+
extra_opset: list of extra opset's, for example the opset's used by custom ops
|
| 422 |
+
shape_override: dict with inputs that override the shapes given by tensorflow
|
| 423 |
+
inputs_as_nchw: transpose inputs in list from nchw to nhwc
|
| 424 |
+
large_model: use the ONNX external tensor storage format
|
| 425 |
+
output_path: save model to output_path
|
| 426 |
+
|
| 427 |
+
Returns:
|
| 428 |
+
An ONNX model_proto and an external_tensor_storage dict.
|
| 429 |
+
"""
|
| 430 |
+
if not input_names:
|
| 431 |
+
raise ValueError("input_names needs to be provided")
|
| 432 |
+
if not output_names:
|
| 433 |
+
raise ValueError("output_names needs to be provided")
|
| 434 |
+
if not name:
|
| 435 |
+
name = "unknown"
|
| 436 |
+
initialized_tables = None
|
| 437 |
+
|
| 438 |
+
with tf.device("/cpu:0"):
|
| 439 |
+
with tf.Graph().as_default() as tf_graph:
|
| 440 |
+
with tf_loader.tf_session(graph=tf_graph) as sess:
|
| 441 |
+
tf.import_graph_def(graph_def, name='')
|
| 442 |
+
frozen_graph = tf_loader.freeze_session(sess, input_names=input_names, output_names=output_names)
|
| 443 |
+
input_names = tf_loader.inputs_without_resource(sess, input_names)
|
| 444 |
+
frozen_graph = tf_loader.tf_optimize(input_names, output_names, graph_def)
|
| 445 |
+
|
| 446 |
+
model_proto, external_tensor_storage = _convert_common(
|
| 447 |
+
frozen_graph,
|
| 448 |
+
name=name,
|
| 449 |
+
continue_on_error=True,
|
| 450 |
+
target=None,
|
| 451 |
+
opset=opset,
|
| 452 |
+
custom_op_handlers=custom_ops,
|
| 453 |
+
extra_opset=extra_opset,
|
| 454 |
+
shape_override=shape_override,
|
| 455 |
+
input_names=input_names,
|
| 456 |
+
output_names=output_names,
|
| 457 |
+
inputs_as_nchw=inputs_as_nchw,
|
| 458 |
+
large_model=large_model,
|
| 459 |
+
tensors_to_rename=tensors_to_rename,
|
| 460 |
+
initialized_tables=initialized_tables,
|
| 461 |
+
output_path=output_path)
|
| 462 |
+
|
| 463 |
+
return model_proto, external_tensor_storage
|
| 464 |
+
|
| 465 |
+
|
| 466 |
+
if __name__ == "__main__":
|
| 467 |
+
main()
|
lib/python3.10/site-packages/tf2onnx/flexbuffers.py
ADDED
|
@@ -0,0 +1,146 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
"""
|
| 5 |
+
tf2onnx.flexbuffers - Code for parsing flexbuffers
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
import struct
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class FlexbufferParseException(Exception):
|
| 12 |
+
pass
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def read_int(buffer, offset, bit_size):
|
| 16 |
+
size = 1 << bit_size
|
| 17 |
+
format_char = 'bhiq'[bit_size]
|
| 18 |
+
return struct.unpack('<' + format_char, buffer[offset:offset+size])[0]
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def read_uint(buffer, offset, bit_size):
|
| 22 |
+
size = 1 << bit_size
|
| 23 |
+
format_char = 'BHIQ'[bit_size]
|
| 24 |
+
return struct.unpack('<' + format_char, buffer[offset:offset+size])[0]
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def read_float(buffer, offset, bit_size):
|
| 28 |
+
if bit_size == 2:
|
| 29 |
+
return struct.unpack('<f', buffer[offset:offset+4])[0]
|
| 30 |
+
if bit_size == 3:
|
| 31 |
+
return struct.unpack('<d', buffer[offset:offset+8])[0]
|
| 32 |
+
raise FlexbufferParseException("Invalid bit size for flexbuffer float: %d" % bit_size)
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def read_string(buffer, offset, size, decode_strings):
|
| 36 |
+
data = buffer[offset:offset+size]
|
| 37 |
+
if decode_strings:
|
| 38 |
+
# Flexbuffer requires all strings to be valid UTF-8 but FlexOps don't always respect this.
|
| 39 |
+
data = data.decode('utf-8')
|
| 40 |
+
return data
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def read_indirect(buffer, offset, bit_size):
|
| 44 |
+
return offset - read_uint(buffer, offset, bit_size)
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
def read_bytes(buffer, offset, size):
|
| 48 |
+
return buffer[offset:offset+size]
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
def read_array(buffer, offset, length, bit_size, packed_type, decode_strings):
|
| 52 |
+
byte_size = 1 << bit_size
|
| 53 |
+
arr = []
|
| 54 |
+
for i in range(length):
|
| 55 |
+
item_offset = offset + (i * byte_size)
|
| 56 |
+
arr.append(read_buffer(buffer, item_offset, bit_size, packed_type, decode_strings))
|
| 57 |
+
return arr
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
def read_buffer(buffer, offset, parent_bit_size, packed_type, decode_strings):
|
| 61 |
+
"""Recursively decode flatbuffer object into python representation"""
|
| 62 |
+
bit_size = packed_type & 3
|
| 63 |
+
value_type = packed_type >> 2
|
| 64 |
+
byte_size = 1 << bit_size
|
| 65 |
+
|
| 66 |
+
if value_type == 0x0:
|
| 67 |
+
return None
|
| 68 |
+
if value_type in [0x1, 0x2, 0x3]:
|
| 69 |
+
read_fn = {0x1: read_int, 0x2: read_uint, 0x3: read_float}[value_type]
|
| 70 |
+
return read_fn(buffer, offset, parent_bit_size)
|
| 71 |
+
if value_type == 0x4:
|
| 72 |
+
str_offset = read_indirect(buffer, offset, parent_bit_size)
|
| 73 |
+
size = 0
|
| 74 |
+
while read_int(buffer, str_offset + size, 0) != 0:
|
| 75 |
+
size += 1
|
| 76 |
+
return read_string(buffer, str_offset, size, decode_strings)
|
| 77 |
+
if value_type == 0x5:
|
| 78 |
+
str_offset = read_indirect(buffer, offset, parent_bit_size)
|
| 79 |
+
size_bit_size = bit_size
|
| 80 |
+
size_byte_size = 1 << size_bit_size
|
| 81 |
+
size = read_uint(buffer, str_offset - size_byte_size, bit_size)
|
| 82 |
+
while read_int(buffer, str_offset + size, 0) != 0:
|
| 83 |
+
size_byte_size <<= 1
|
| 84 |
+
size_bit_size += 1
|
| 85 |
+
size = read_uint(buffer, str_offset - size_byte_size, size_bit_size)
|
| 86 |
+
return read_string(buffer, str_offset, size, decode_strings)
|
| 87 |
+
if value_type in [0x6, 0x7, 0x8]:
|
| 88 |
+
read_fn = {0x6: read_int, 0x7: read_uint, 0x8: read_float}[value_type]
|
| 89 |
+
data_offset = read_indirect(buffer, offset, parent_bit_size)
|
| 90 |
+
return read_fn(buffer, data_offset, bit_size)
|
| 91 |
+
if value_type == 0x9:
|
| 92 |
+
length = read_uint(buffer, read_indirect(buffer, offset, parent_bit_size) - byte_size, bit_size)
|
| 93 |
+
keys_offset = read_indirect(buffer, offset, parent_bit_size) - (byte_size * 3)
|
| 94 |
+
keys_vector_offset = read_indirect(buffer, keys_offset, bit_size)
|
| 95 |
+
key_byte_size = read_uint(buffer, keys_offset + byte_size, bit_size)
|
| 96 |
+
key_bit_size = {1: 0, 2: 1, 4: 2, 8: 3, 16: 4}[key_byte_size]
|
| 97 |
+
values_offset = read_indirect(buffer, offset, parent_bit_size)
|
| 98 |
+
packed_types_offset = values_offset + length * byte_size
|
| 99 |
+
obj = {}
|
| 100 |
+
for i in range(length):
|
| 101 |
+
key_offset = keys_vector_offset + i * key_byte_size
|
| 102 |
+
key = read_buffer(buffer, key_offset, key_bit_size, (0x4 << 2) | key_bit_size, decode_strings)
|
| 103 |
+
value_offset = values_offset + i * byte_size
|
| 104 |
+
value_packed_type = read_uint(buffer, packed_types_offset + i, 0)
|
| 105 |
+
value = read_buffer(buffer, value_offset, bit_size, value_packed_type, decode_strings)
|
| 106 |
+
obj[key] = value
|
| 107 |
+
return obj
|
| 108 |
+
if value_type == 0xa:
|
| 109 |
+
length = read_uint(buffer, read_indirect(buffer, offset, parent_bit_size) - byte_size, bit_size)
|
| 110 |
+
arr = []
|
| 111 |
+
items_offset = read_indirect(buffer, offset, parent_bit_size)
|
| 112 |
+
packed_types_offset = items_offset + (length * byte_size)
|
| 113 |
+
for i in range(length):
|
| 114 |
+
item_offset = items_offset + (i * byte_size)
|
| 115 |
+
packed_type = read_uint(buffer, packed_types_offset + i, 0)
|
| 116 |
+
arr.append(read_buffer(buffer, item_offset, bit_size, packed_type, decode_strings))
|
| 117 |
+
return arr
|
| 118 |
+
if value_type in [0xb, 0xc, 0xd, 0xe, 0xf, 0x24]:
|
| 119 |
+
length_offset = read_indirect(buffer, offset, parent_bit_size) - byte_size
|
| 120 |
+
length = read_uint(buffer, length_offset, bit_size)
|
| 121 |
+
item_value_type = value_type - 0xb + 0x1
|
| 122 |
+
packed_type = item_value_type << 2
|
| 123 |
+
items_offset = read_indirect(buffer, offset, parent_bit_size)
|
| 124 |
+
return read_array(buffer, items_offset, length, bit_size, packed_type, decode_strings)
|
| 125 |
+
if 0x10 <= value_type <= 0x18:
|
| 126 |
+
length = (value_type - 0x10) // 3 + 2
|
| 127 |
+
value_type = ((value_type - 0x10) % 3) + 1
|
| 128 |
+
packed_type = value_type << 2
|
| 129 |
+
items_offset = read_indirect(buffer, offset, parent_bit_size)
|
| 130 |
+
return read_array(buffer, items_offset, length, bit_size, packed_type, decode_strings)
|
| 131 |
+
if value_type == 0x19:
|
| 132 |
+
data_offset = read_indirect(buffer, offset, parent_bit_size)
|
| 133 |
+
size_offset = data_offset - byte_size
|
| 134 |
+
size = read_uint(buffer, size_offset, bit_size)
|
| 135 |
+
return read_bytes(buffer, data_offset, size)
|
| 136 |
+
if value_type == 0x1a:
|
| 137 |
+
return read_uint(buffer, offset, parent_bit_size) > 0
|
| 138 |
+
raise FlexbufferParseException("Invalid flexbuffer value type %r" % value_type)
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
def read_flexbuffer(buffer, decode_strings=True):
|
| 142 |
+
byte_size = read_uint(buffer, len(buffer) - 1, 0)
|
| 143 |
+
bit_size = {1: 0, 2: 1, 4: 2, 8: 3, 16: 4}[byte_size]
|
| 144 |
+
packed_type = read_uint(buffer, len(buffer) - 2, 0)
|
| 145 |
+
offset = len(buffer) - 2 - byte_size
|
| 146 |
+
return read_buffer(buffer, offset, bit_size, packed_type, decode_strings)
|
lib/python3.10/site-packages/tf2onnx/graph.py
ADDED
|
@@ -0,0 +1,1730 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
"""
|
| 5 |
+
tf2onnx.graph - class to manage graph manipulation on top of onnx
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
from __future__ import division
|
| 9 |
+
from __future__ import print_function
|
| 10 |
+
from __future__ import unicode_literals
|
| 11 |
+
|
| 12 |
+
import collections
|
| 13 |
+
import copy
|
| 14 |
+
import logging
|
| 15 |
+
import six
|
| 16 |
+
import numpy as np
|
| 17 |
+
|
| 18 |
+
from onnx import helper, numpy_helper, shape_inference, OperatorSetIdProto, AttributeProto, TensorProto
|
| 19 |
+
from tf2onnx import utils, __version__
|
| 20 |
+
from tf2onnx.utils import make_name, port_name, find_opset
|
| 21 |
+
from tf2onnx import optimizer
|
| 22 |
+
from tf2onnx.schemas import get_schema, infer_onnx_shape_dtype
|
| 23 |
+
from tf2onnx import constants
|
| 24 |
+
|
| 25 |
+
logger = logging.getLogger(__name__)
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
# todo(pengwa): remove protected-access later
|
| 29 |
+
# pylint: disable=broad-except,protected-access
|
| 30 |
+
|
| 31 |
+
class ExternalTensorStorage():
|
| 32 |
+
"""Passed into graph and node methods to accumulate tensors to save externally"""
|
| 33 |
+
def __init__(self):
|
| 34 |
+
self.name_to_tensor_data = {}
|
| 35 |
+
self.name_counter = 0
|
| 36 |
+
self.external_tensor_size_threshold = 1024
|
| 37 |
+
self.node_to_modified_value_attr = {}
|
| 38 |
+
|
| 39 |
+
class Node(object):
|
| 40 |
+
"""A Node - wrapper around onnx nodes that we use for graph manipulations."""
|
| 41 |
+
|
| 42 |
+
def __init__(self, node, graph, skip_conversion=False):
|
| 43 |
+
"""Create Node.
|
| 44 |
+
Args:
|
| 45 |
+
node: Onnx node in NodeProto
|
| 46 |
+
graph: Graph() we are part of
|
| 47 |
+
"""
|
| 48 |
+
self._op = node
|
| 49 |
+
self.graph = graph
|
| 50 |
+
self._input = list(node.input)
|
| 51 |
+
self._output = list(node.output)
|
| 52 |
+
self._attr = {}
|
| 53 |
+
|
| 54 |
+
graph.set_node_by_name(self)
|
| 55 |
+
# dict to original attributes
|
| 56 |
+
for a in node.attribute:
|
| 57 |
+
self._attr[a.name] = a
|
| 58 |
+
self._skip_conversion = skip_conversion
|
| 59 |
+
|
| 60 |
+
@property
|
| 61 |
+
def input(self):
|
| 62 |
+
return self._input
|
| 63 |
+
|
| 64 |
+
@input.setter
|
| 65 |
+
def input(self, val):
|
| 66 |
+
# The setter can catch that all inputs are change
|
| 67 |
+
# but it cannot catch that one input is changed.
|
| 68 |
+
# That's method replace_input and replace_inputs must
|
| 69 |
+
# be used to change inputs to let the graph instance
|
| 70 |
+
# update its internal indices.
|
| 71 |
+
self._input = copy.deepcopy(val)
|
| 72 |
+
|
| 73 |
+
@property
|
| 74 |
+
def output(self):
|
| 75 |
+
return self._output
|
| 76 |
+
|
| 77 |
+
@output.setter
|
| 78 |
+
def output(self, val):
|
| 79 |
+
"""Set op output. Output should be updated explicitly,
|
| 80 |
+
changing it would require output mapping changed.
|
| 81 |
+
"""
|
| 82 |
+
self._graph_check()
|
| 83 |
+
for o in self._output:
|
| 84 |
+
del self.graph._output_to_node_name[o]
|
| 85 |
+
|
| 86 |
+
self._output = val.copy()
|
| 87 |
+
for o in self._output:
|
| 88 |
+
utils.make_sure(o not in self.graph._output_to_node_name, "output %s already in output mapping", o)
|
| 89 |
+
self.graph._output_to_node_name[o] = self.name
|
| 90 |
+
|
| 91 |
+
@property
|
| 92 |
+
def inputs(self):
|
| 93 |
+
"""Input node objects."""
|
| 94 |
+
self._graph_check()
|
| 95 |
+
val = [self.graph.get_node_by_output(n) for n in self._input]
|
| 96 |
+
return val
|
| 97 |
+
|
| 98 |
+
@property
|
| 99 |
+
def attr(self):
|
| 100 |
+
return self._attr
|
| 101 |
+
|
| 102 |
+
def get_value_attr(self, external_tensor_storage=None):
|
| 103 |
+
"""Return onnx attr for value property of node.
|
| 104 |
+
Attr is modified to point to external tensor data stored in external_tensor_storage, if included.
|
| 105 |
+
"""
|
| 106 |
+
a = self._attr["value"]
|
| 107 |
+
if external_tensor_storage is not None and self in external_tensor_storage.node_to_modified_value_attr:
|
| 108 |
+
return external_tensor_storage.node_to_modified_value_attr[self]
|
| 109 |
+
if external_tensor_storage is None or a.type != AttributeProto.TENSOR:
|
| 110 |
+
return a
|
| 111 |
+
if np.product(a.t.dims) > external_tensor_storage.external_tensor_size_threshold:
|
| 112 |
+
a = copy.copy(a)
|
| 113 |
+
tensor_name = self.name.strip() + "_" + str(external_tensor_storage.name_counter)
|
| 114 |
+
for c in '~"#%&*:<>?/\\{|}':
|
| 115 |
+
tensor_name = tensor_name.replace(c, '_')
|
| 116 |
+
external_tensor_storage.name_counter += 1
|
| 117 |
+
external_tensor_storage.name_to_tensor_data[tensor_name] = a.t.raw_data
|
| 118 |
+
external_tensor_storage.node_to_modified_value_attr[self] = a
|
| 119 |
+
a.t.raw_data = b''
|
| 120 |
+
location = a.t.external_data.add()
|
| 121 |
+
location.key = "location"
|
| 122 |
+
location.value = tensor_name
|
| 123 |
+
a.t.data_location = TensorProto.EXTERNAL
|
| 124 |
+
return a
|
| 125 |
+
|
| 126 |
+
def get_onnx_attrs(self, external_tensor_storage=None):
|
| 127 |
+
"""Return onnx valid attributes.
|
| 128 |
+
Attrs point to external tensor data stored in external_tensor_storage, if included."""
|
| 129 |
+
schema = get_schema(self.type, self.graph.opset, self.domain)
|
| 130 |
+
if schema is None and not (self.is_const() or self.is_graph_input()):
|
| 131 |
+
logger.debug("Node %s uses non-stardard onnx op <%s, %s>, skip attribute check",
|
| 132 |
+
self.name, self.domain, self.type)
|
| 133 |
+
onnx_attrs = {}
|
| 134 |
+
for a in self._attr.values():
|
| 135 |
+
if a.name == "value":
|
| 136 |
+
onnx_attrs[a.name] = self.get_value_attr(external_tensor_storage)
|
| 137 |
+
elif schema is None or schema.has_attribute(a.name):
|
| 138 |
+
onnx_attrs[a.name] = a
|
| 139 |
+
return onnx_attrs
|
| 140 |
+
|
| 141 |
+
@property
|
| 142 |
+
def name(self):
|
| 143 |
+
return self._op.name
|
| 144 |
+
|
| 145 |
+
def child_name(self):
|
| 146 |
+
return utils.make_name(self.name)
|
| 147 |
+
|
| 148 |
+
@property
|
| 149 |
+
def op(self):
|
| 150 |
+
"""TODO: have a better interface for this."""
|
| 151 |
+
return self._op
|
| 152 |
+
|
| 153 |
+
@property
|
| 154 |
+
def type(self):
|
| 155 |
+
"""Return Op type."""
|
| 156 |
+
return self._op.op_type
|
| 157 |
+
|
| 158 |
+
@type.setter
|
| 159 |
+
def type(self, val):
|
| 160 |
+
"""Set Op type."""
|
| 161 |
+
self._op.op_type = val
|
| 162 |
+
|
| 163 |
+
@property
|
| 164 |
+
def domain(self):
|
| 165 |
+
"""Return Op type."""
|
| 166 |
+
return self._op.domain
|
| 167 |
+
|
| 168 |
+
@domain.setter
|
| 169 |
+
def domain(self, val):
|
| 170 |
+
"""Set Op type."""
|
| 171 |
+
self._op.domain = val
|
| 172 |
+
|
| 173 |
+
@property
|
| 174 |
+
def data_format(self):
|
| 175 |
+
"""Return data_format."""
|
| 176 |
+
attr_str = self.get_attr_value("data_format")
|
| 177 |
+
return "unkown" if attr_str is None else attr_str.decode("utf-8")
|
| 178 |
+
|
| 179 |
+
@data_format.setter
|
| 180 |
+
def data_format(self, val):
|
| 181 |
+
"""Set data_format."""
|
| 182 |
+
self.set_attr("data_format", val)
|
| 183 |
+
|
| 184 |
+
def is_nhwc(self):
|
| 185 |
+
"""Return True if node is in NHWC format."""
|
| 186 |
+
utils.make_sure('D' not in self.data_format, "is_nhwc called on %s with spatial=2 but data_format=%s",
|
| 187 |
+
self.name, self.data_format)
|
| 188 |
+
return self.data_format == "NHWC"
|
| 189 |
+
|
| 190 |
+
def is_const(self):
|
| 191 |
+
"""Return True if node is a constant."""
|
| 192 |
+
return self.type in ["Const", "ConstV2"]
|
| 193 |
+
|
| 194 |
+
def is_scalar(self):
|
| 195 |
+
"""Return True if node is a constant with a scalar value."""
|
| 196 |
+
if not self.is_const():
|
| 197 |
+
return False
|
| 198 |
+
t = self.get_attr("value", default=None)
|
| 199 |
+
if t is None:
|
| 200 |
+
return False
|
| 201 |
+
t = numpy_helper.to_array(helper.get_attribute_value(t))
|
| 202 |
+
return t.shape == tuple()
|
| 203 |
+
|
| 204 |
+
def is_graph_input(self):
|
| 205 |
+
return self.type in ["Placeholder", "PlaceholderWithDefault", "PlaceholderV2"]
|
| 206 |
+
|
| 207 |
+
def is_graph_input_default_const(self):
|
| 208 |
+
return self.is_const() and any(
|
| 209 |
+
out.is_graph_input() for out in self.graph.find_output_consumers(self.output[0])
|
| 210 |
+
)
|
| 211 |
+
|
| 212 |
+
def is_while(self):
|
| 213 |
+
return self.type in ["While", "StatelessWhile", "Loop"]
|
| 214 |
+
|
| 215 |
+
def __str__(self):
|
| 216 |
+
return str(self._op)
|
| 217 |
+
|
| 218 |
+
def __repr__(self):
|
| 219 |
+
return "<onnx op type='%s' name=%s>" % (self.type, self._op.name)
|
| 220 |
+
|
| 221 |
+
@property
|
| 222 |
+
def summary(self):
|
| 223 |
+
"""Return node summary information."""
|
| 224 |
+
lines = []
|
| 225 |
+
lines.append("OP={}".format(self.type))
|
| 226 |
+
lines.append("Name={}".format(self.name))
|
| 227 |
+
|
| 228 |
+
g = self.graph
|
| 229 |
+
if self.input:
|
| 230 |
+
lines.append("Inputs:")
|
| 231 |
+
for name in self.input:
|
| 232 |
+
node = g.get_node_by_output(name)
|
| 233 |
+
op = node.type if node else "N/A"
|
| 234 |
+
lines.append("\t{}={}, {}, {}".format(name, op, g.get_shape(name), g.get_dtype(name)))
|
| 235 |
+
|
| 236 |
+
if self.output:
|
| 237 |
+
for name in self.output:
|
| 238 |
+
lines.append("Outpus:")
|
| 239 |
+
lines.append("\t{}={}, {}".format(name, g.get_shape(name), g.get_dtype(name)))
|
| 240 |
+
|
| 241 |
+
return '\n'.join(lines)
|
| 242 |
+
|
| 243 |
+
def get_attr(self, name, default=None):
|
| 244 |
+
"""Get raw attribute value."""
|
| 245 |
+
attr = self.attr.get(name, default)
|
| 246 |
+
return attr
|
| 247 |
+
|
| 248 |
+
def get_attr_value(self, name, default=None):
|
| 249 |
+
attr = self.get_attr(name)
|
| 250 |
+
if attr:
|
| 251 |
+
return helper.get_attribute_value(attr)
|
| 252 |
+
return default
|
| 253 |
+
|
| 254 |
+
def get_attr_int(self, name):
|
| 255 |
+
"""Get attribute value as int."""
|
| 256 |
+
attr_int = self.get_attr_value(name)
|
| 257 |
+
utils.make_sure(
|
| 258 |
+
attr_int is not None and isinstance(attr_int, int),
|
| 259 |
+
"attribute %s is None", name
|
| 260 |
+
)
|
| 261 |
+
return attr_int
|
| 262 |
+
|
| 263 |
+
def get_attr_str(self, name, encoding="utf-8"):
|
| 264 |
+
"""Get attribute value as string."""
|
| 265 |
+
attr_str = self.get_attr_value(name)
|
| 266 |
+
utils.make_sure(
|
| 267 |
+
attr_str is not None and isinstance(attr_str, bytes),
|
| 268 |
+
"attribute %s is None", name
|
| 269 |
+
)
|
| 270 |
+
return attr_str.decode(encoding)
|
| 271 |
+
|
| 272 |
+
def set_attr(self, name, value):
|
| 273 |
+
self.attr[name] = helper.make_attribute(name, value)
|
| 274 |
+
|
| 275 |
+
def set_attr_onnx(self, value):
|
| 276 |
+
self.attr[value.name] = value
|
| 277 |
+
|
| 278 |
+
@property
|
| 279 |
+
def skip_conversion(self):
|
| 280 |
+
return self._skip_conversion
|
| 281 |
+
|
| 282 |
+
@skip_conversion.setter
|
| 283 |
+
def skip_conversion(self, val):
|
| 284 |
+
self._skip_conversion = val
|
| 285 |
+
|
| 286 |
+
# If some Node is created as onnx_node, then we don't need convert it
|
| 287 |
+
def need_skip(self):
|
| 288 |
+
return self._skip_conversion
|
| 289 |
+
|
| 290 |
+
@property
|
| 291 |
+
def output_shapes(self):
|
| 292 |
+
"""Get output shapes."""
|
| 293 |
+
self._graph_check()
|
| 294 |
+
val = [self.graph.get_shape(n) for n in self._output]
|
| 295 |
+
return val
|
| 296 |
+
|
| 297 |
+
@property
|
| 298 |
+
def output_dtypes(self):
|
| 299 |
+
"""Get output dtypes."""
|
| 300 |
+
self._graph_check()
|
| 301 |
+
val = [self.graph.get_dtype(n) for n in self._output]
|
| 302 |
+
return val
|
| 303 |
+
|
| 304 |
+
def get_tensor_value(self, as_list=True):
|
| 305 |
+
"""Get value for onnx tensor.
|
| 306 |
+
Args:
|
| 307 |
+
as_list: whether return numpy ndarray in list.
|
| 308 |
+
Returns:
|
| 309 |
+
If as_list=True, return the array as a (possibly nested) list.
|
| 310 |
+
Otherwise, return data of type np.ndarray.
|
| 311 |
+
|
| 312 |
+
If a tensor is a scalar having value 1,
|
| 313 |
+
when as_list=False, return np.array(1), type is <class 'numpy.ndarray'>
|
| 314 |
+
when as_list=True, return 1, type is <class 'int'>.
|
| 315 |
+
"""
|
| 316 |
+
if not self.is_const():
|
| 317 |
+
raise ValueError("get tensor value: '{}' must be Const".format(self.name))
|
| 318 |
+
|
| 319 |
+
t = self.get_attr("value")
|
| 320 |
+
if t:
|
| 321 |
+
t = numpy_helper.to_array(helper.get_attribute_value(t))
|
| 322 |
+
if as_list is True:
|
| 323 |
+
t = t.tolist() # t might be scalar after tolist()
|
| 324 |
+
return t
|
| 325 |
+
|
| 326 |
+
def scalar_to_dim1(self):
|
| 327 |
+
"""Get value for onnx tensor."""
|
| 328 |
+
if not self.is_const():
|
| 329 |
+
raise ValueError("get tensor value: {} must be Const".format(self.name))
|
| 330 |
+
|
| 331 |
+
t = self.get_attr("value")
|
| 332 |
+
if t:
|
| 333 |
+
t = helper.get_attribute_value(t)
|
| 334 |
+
if not t.dims:
|
| 335 |
+
t.dims.extend([1])
|
| 336 |
+
return t.dims
|
| 337 |
+
|
| 338 |
+
def set_tensor_value(self, new_val):
|
| 339 |
+
"""Set new value for existing onnx tensor.
|
| 340 |
+
Args:
|
| 341 |
+
new_val: value of type numpy ndarray
|
| 342 |
+
"""
|
| 343 |
+
if not self.is_const():
|
| 344 |
+
raise ValueError("set tensor value: {} must be Const".format(self.name))
|
| 345 |
+
t = self.get_attr("value")
|
| 346 |
+
if not t:
|
| 347 |
+
raise ValueError("set tensor value: {} is None".format(self.name))
|
| 348 |
+
t = helper.get_attribute_value(t)
|
| 349 |
+
onnx_tensor = numpy_helper.from_array(new_val, t.name)
|
| 350 |
+
del t
|
| 351 |
+
self.set_attr("value", onnx_tensor)
|
| 352 |
+
# track shapes in _output_shapes
|
| 353 |
+
self._graph_check()
|
| 354 |
+
self.graph.set_shape(onnx_tensor.name, list(onnx_tensor.dims))
|
| 355 |
+
|
| 356 |
+
def get_body_graphs(self):
|
| 357 |
+
self._graph_check()
|
| 358 |
+
return self.graph.contained_graphs.get(self.name, None)
|
| 359 |
+
|
| 360 |
+
def set_body_graph_as_attr(self, attr_name, graph):
|
| 361 |
+
self._graph_check()
|
| 362 |
+
if self.name not in self.graph.contained_graphs:
|
| 363 |
+
self.graph.contained_graphs[self.name] = {}
|
| 364 |
+
|
| 365 |
+
self.graph.contained_graphs[self.name].update({attr_name: graph})
|
| 366 |
+
graph.parent_graph = self.graph
|
| 367 |
+
|
| 368 |
+
def update_proto(self, external_tensor_storage=None):
|
| 369 |
+
"""Update protobuf from internal structure."""
|
| 370 |
+
nodes = list(self._op.input)
|
| 371 |
+
for node in nodes:
|
| 372 |
+
self._op.input.remove(node)
|
| 373 |
+
self._op.input.extend(self.input)
|
| 374 |
+
nodes = list(self._op.output)
|
| 375 |
+
for node in nodes:
|
| 376 |
+
self._op.output.remove(node)
|
| 377 |
+
self._op.output.extend(self.output)
|
| 378 |
+
|
| 379 |
+
# update attributes to proto
|
| 380 |
+
del self._op.attribute[:]
|
| 381 |
+
|
| 382 |
+
# check attribute of type GraphProto
|
| 383 |
+
attr_graphs = self.get_body_graphs()
|
| 384 |
+
if attr_graphs:
|
| 385 |
+
for attr_name, sub_graph in attr_graphs.items():
|
| 386 |
+
graph_proto = sub_graph.make_graph("graph for " + self.name + " " + attr_name,
|
| 387 |
+
external_tensor_storage=external_tensor_storage)
|
| 388 |
+
self.set_attr(attr_name, graph_proto)
|
| 389 |
+
|
| 390 |
+
attr = list(self.get_onnx_attrs(external_tensor_storage).values())
|
| 391 |
+
if attr:
|
| 392 |
+
self._op.attribute.extend(attr)
|
| 393 |
+
|
| 394 |
+
def get_implicit_inputs(self, recursive=True):
|
| 395 |
+
"""Get implicit inputs if the node has attributes being GraphProto."""
|
| 396 |
+
output_available_in_cur_graph = set()
|
| 397 |
+
all_node_inputs = set()
|
| 398 |
+
|
| 399 |
+
graphs = []
|
| 400 |
+
body_graphs = self.get_body_graphs()
|
| 401 |
+
if body_graphs:
|
| 402 |
+
graphs.extend(body_graphs.values())
|
| 403 |
+
|
| 404 |
+
while graphs:
|
| 405 |
+
graph = graphs.pop()
|
| 406 |
+
for n in graph.get_nodes():
|
| 407 |
+
output_available_in_cur_graph |= set(n.output)
|
| 408 |
+
for i in n.input:
|
| 409 |
+
all_node_inputs.add(i)
|
| 410 |
+
|
| 411 |
+
if recursive:
|
| 412 |
+
b_graphs = n.get_body_graphs()
|
| 413 |
+
if b_graphs:
|
| 414 |
+
graphs.extend(b_graphs.values())
|
| 415 |
+
|
| 416 |
+
outer_scope_node_input_ids = all_node_inputs - output_available_in_cur_graph
|
| 417 |
+
return list(outer_scope_node_input_ids)
|
| 418 |
+
|
| 419 |
+
def _graph_check(self):
|
| 420 |
+
utils.make_sure(self.graph is not None, "Node %s not belonging any graph",
|
| 421 |
+
self.name)
|
| 422 |
+
|
| 423 |
+
def maybe_cast_input(self, supported, type_map):
|
| 424 |
+
""".maybe_cast_input
|
| 425 |
+
Args:
|
| 426 |
+
supported: list of supported types for inputs
|
| 427 |
+
type_map: dict type to supported type mapping
|
| 428 |
+
"""
|
| 429 |
+
did_cast = False
|
| 430 |
+
for i, name in enumerate(self.input):
|
| 431 |
+
dtype = self.graph.get_dtype(name)
|
| 432 |
+
if dtype not in supported[i]:
|
| 433 |
+
tdtype = type_map.get(dtype)
|
| 434 |
+
if tdtype is None:
|
| 435 |
+
raise RuntimeError("don't know how to cast type {} on node {}".format(dtype, name))
|
| 436 |
+
shape = self.graph.get_shape(name)
|
| 437 |
+
cast_node = self.graph.insert_new_node_on_input(
|
| 438 |
+
self, "Cast", name, to=tdtype)
|
| 439 |
+
self.graph.set_dtype(cast_node.output[0], [tdtype])
|
| 440 |
+
self.graph.set_shape(cast_node.output[0], shape)
|
| 441 |
+
did_cast = True
|
| 442 |
+
return did_cast
|
| 443 |
+
|
| 444 |
+
|
| 445 |
+
class Graph(object):
|
| 446 |
+
""""Class that provides graph manipulation and matching."""
|
| 447 |
+
|
| 448 |
+
def __init__(self, nodes, output_shapes=None, dtypes=None, target=None, opset=None, extra_opset=None,
|
| 449 |
+
input_names=None, output_names=None, is_subgraph=False, graph_name=None):
|
| 450 |
+
"""Create Graph.
|
| 451 |
+
Args:
|
| 452 |
+
nodes: list of Node()
|
| 453 |
+
output_shapes: dict of tensorflow output shapes
|
| 454 |
+
dtypes: dict of tensorflow dtype
|
| 455 |
+
"""
|
| 456 |
+
if target is None:
|
| 457 |
+
target = []
|
| 458 |
+
self._nodes = []
|
| 459 |
+
self._nodes_by_name = {}
|
| 460 |
+
self._output_to_node_name = {}
|
| 461 |
+
self._output_to_consumers = {}
|
| 462 |
+
self._input_to_graph = {}
|
| 463 |
+
self.shapes = {}
|
| 464 |
+
self.graph_name = graph_name or utils.make_name("tf2onnx")
|
| 465 |
+
self._is_subgraph = is_subgraph
|
| 466 |
+
self.ta_reads = []
|
| 467 |
+
# A list of index, output tuples of potential scan outputs in this graph
|
| 468 |
+
# Used by the tflite while loop handler
|
| 469 |
+
self.scan_outputs = []
|
| 470 |
+
self.func_inputs = []
|
| 471 |
+
|
| 472 |
+
self._target = set(target)
|
| 473 |
+
self._dtypes = dtypes
|
| 474 |
+
|
| 475 |
+
self._output_shapes = output_shapes
|
| 476 |
+
self._opset = find_opset(opset)
|
| 477 |
+
|
| 478 |
+
if extra_opset is not None:
|
| 479 |
+
utils.make_sure(isinstance(extra_opset, list), "invalid extra_opset")
|
| 480 |
+
self._extra_opset = extra_opset
|
| 481 |
+
|
| 482 |
+
self.outputs = output_names if output_names is not None else []
|
| 483 |
+
|
| 484 |
+
self.parent_graph = None
|
| 485 |
+
self.contained_graphs = {} # {node_name: {node_attribute_name: Graph}}
|
| 486 |
+
|
| 487 |
+
ops = [Node(node, self) for node in nodes]
|
| 488 |
+
if input_names is not None:
|
| 489 |
+
input_names_set = set(input_names)
|
| 490 |
+
for n in ops:
|
| 491 |
+
for i, out in enumerate(n.output):
|
| 492 |
+
if out in input_names_set and not n.is_graph_input():
|
| 493 |
+
n.output[i] = utils.make_name("@@ALLOC")
|
| 494 |
+
ops.append(Node(helper.make_node("Placeholder", [], outputs=[out], name=out), self))
|
| 495 |
+
logger.info("Created placeholder for input %s", out)
|
| 496 |
+
|
| 497 |
+
input_nodes = {n.output[0]: n for n in ops if n.is_graph_input()}
|
| 498 |
+
if input_names is not None:
|
| 499 |
+
self.inputs = [input_nodes[n] for n in input_names]
|
| 500 |
+
else:
|
| 501 |
+
self.inputs = list(input_nodes.values())
|
| 502 |
+
|
| 503 |
+
self.reset_nodes(ops)
|
| 504 |
+
|
| 505 |
+
if not is_subgraph:
|
| 506 |
+
# add identity node after each output, in case it is renamed during conversion.
|
| 507 |
+
for o in self.outputs:
|
| 508 |
+
n = self.get_node_by_output_in_current_graph(o)
|
| 509 |
+
if n.is_graph_input():
|
| 510 |
+
# Don't add identity if the node is also an input. We want to keep input names the same.
|
| 511 |
+
continue
|
| 512 |
+
new_output_name = port_name(n.name + "_" + utils.make_name("raw_output_"))
|
| 513 |
+
n_shapes = n.output_shapes
|
| 514 |
+
n_dtypes = n.output_dtypes
|
| 515 |
+
body_graphs = n.graph.contained_graphs.pop(n.name, None)
|
| 516 |
+
self.remove_node(n.name)
|
| 517 |
+
|
| 518 |
+
new_outputs = [output if output != o else new_output_name for output in n.output]
|
| 519 |
+
# domain should be passed to new node
|
| 520 |
+
branches = {}
|
| 521 |
+
if body_graphs:
|
| 522 |
+
for attr_name, body_graph in body_graphs.items():
|
| 523 |
+
body_graph.parent_graph = self
|
| 524 |
+
branches[attr_name] = body_graph
|
| 525 |
+
|
| 526 |
+
_ = self.make_node(n.type, n.input, outputs=new_outputs, attr=n.attr, name=n.name,
|
| 527 |
+
skip_conversion=n._skip_conversion, dtypes=n_dtypes, shapes=n_shapes,
|
| 528 |
+
domain=n.domain, branches=branches)
|
| 529 |
+
|
| 530 |
+
self.replace_all_inputs(o, new_output_name, ops=self.get_nodes())
|
| 531 |
+
self.make_node("Identity", [new_output_name], outputs=[o], op_name_scope=n.name + "_" + "graph_outputs")
|
| 532 |
+
self.copy_shape(new_output_name, o)
|
| 533 |
+
self.copy_dtype(new_output_name, o)
|
| 534 |
+
|
| 535 |
+
def create_new_graph_with_same_config(self):
|
| 536 |
+
"""Create a clean graph inheriting current graph's configuration."""
|
| 537 |
+
return Graph([], output_shapes={}, dtypes={}, target=self._target, opset=self._opset,
|
| 538 |
+
extra_opset=self.extra_opset, output_names=[])
|
| 539 |
+
|
| 540 |
+
@property
|
| 541 |
+
def input_names(self):
|
| 542 |
+
"""Placeholder node outputs"""
|
| 543 |
+
return [node.output[0] for node in self.inputs]
|
| 544 |
+
|
| 545 |
+
@property
|
| 546 |
+
def opset(self):
|
| 547 |
+
return self._opset
|
| 548 |
+
|
| 549 |
+
@property
|
| 550 |
+
def extra_opset(self):
|
| 551 |
+
return self._extra_opset
|
| 552 |
+
|
| 553 |
+
def is_target(self, *names):
|
| 554 |
+
"""Return True if target platform contains any name."""
|
| 555 |
+
return any(name in self._target for name in names)
|
| 556 |
+
|
| 557 |
+
def make_consts(self, values, np_type=np.int64, skip_conversion=False, raw=True):
|
| 558 |
+
"""create list of consts of same type"""
|
| 559 |
+
consts = []
|
| 560 |
+
for value in values:
|
| 561 |
+
np_val = np.array(value).astype(np_type)
|
| 562 |
+
consts.append(self.make_const(utils.make_name("const"), np_val, skip_conversion, raw))
|
| 563 |
+
return consts
|
| 564 |
+
|
| 565 |
+
def make_const(self, name, np_val, skip_conversion=False, raw=True):
|
| 566 |
+
"""Make a new constant in the graph.
|
| 567 |
+
Args:
|
| 568 |
+
name: const node name, must be unique.
|
| 569 |
+
np_val: value of type numpy ndarray.
|
| 570 |
+
skip_conversion: bool, indicate whether this created node would be mapped during conversion.
|
| 571 |
+
raw: whether to store data at field of raw_data or the specific field according to its dtype
|
| 572 |
+
"""
|
| 573 |
+
np_val_flat = np_val.flatten()
|
| 574 |
+
is_bytes = np_val.dtype == np.object and len(np_val_flat) > 0 and isinstance(np_val_flat[0], bytes)
|
| 575 |
+
if raw and not is_bytes:
|
| 576 |
+
onnx_tensor = numpy_helper.from_array(np_val, name)
|
| 577 |
+
else:
|
| 578 |
+
onnx_tensor = helper.make_tensor(name, utils.map_numpy_to_onnx_dtype(np_val.dtype),
|
| 579 |
+
np_val.shape, np_val_flat, raw=False)
|
| 580 |
+
dtype = onnx_tensor.data_type
|
| 581 |
+
node = self.make_node("Const", [], outputs=[name], name=name, attr={"value": onnx_tensor},
|
| 582 |
+
skip_conversion=skip_conversion, dtypes=[dtype], infer_shape_dtype=False)
|
| 583 |
+
self.set_shape(name, np_val.shape)
|
| 584 |
+
self.set_dtype(name, utils.map_numpy_to_onnx_dtype(np_val.dtype))
|
| 585 |
+
return node
|
| 586 |
+
|
| 587 |
+
def copy_const(self, node, name=None):
|
| 588 |
+
"""Copy a const node, using name if specified"""
|
| 589 |
+
# TODO: support attr copy starting at opset 12
|
| 590 |
+
if name is None:
|
| 591 |
+
name = utils.make_name(node.name)
|
| 592 |
+
return self.make_const(name, node.get_tensor_value(as_list=False))
|
| 593 |
+
|
| 594 |
+
def make_node(self, op_type, inputs, attr=None, output_count=1, outputs=None, skip_conversion=True,
|
| 595 |
+
op_name_scope=None, name=None, shapes=None, dtypes=None, domain=constants.ONNX_DOMAIN,
|
| 596 |
+
infer_shape_dtype=True, branches=None):
|
| 597 |
+
"""Make a new onnx node in the graph"""
|
| 598 |
+
if attr is None:
|
| 599 |
+
attr = {}
|
| 600 |
+
if shapes is None:
|
| 601 |
+
shapes = []
|
| 602 |
+
if dtypes is None:
|
| 603 |
+
dtypes = []
|
| 604 |
+
if branches is None:
|
| 605 |
+
branches = {}
|
| 606 |
+
if name is None:
|
| 607 |
+
name = utils.make_name(op_type)
|
| 608 |
+
|
| 609 |
+
if op_name_scope:
|
| 610 |
+
name = "_".join([op_name_scope, name])
|
| 611 |
+
|
| 612 |
+
logger.debug("Making node: Name=%s, OP=%s", name, op_type)
|
| 613 |
+
|
| 614 |
+
if outputs is None:
|
| 615 |
+
outputs = [name + ":" + str(i) for i in range(output_count)]
|
| 616 |
+
|
| 617 |
+
output_count = len(outputs)
|
| 618 |
+
raw_attr = {}
|
| 619 |
+
onnx_attrs = []
|
| 620 |
+
for a, v in attr.items():
|
| 621 |
+
if isinstance(v, AttributeProto):
|
| 622 |
+
onnx_attrs.append(v)
|
| 623 |
+
else:
|
| 624 |
+
raw_attr[a] = v
|
| 625 |
+
|
| 626 |
+
n = self.get_node_by_name(name)
|
| 627 |
+
utils.make_sure(n is None, "name %s already exists in node: \n%s", name, n)
|
| 628 |
+
for o in outputs:
|
| 629 |
+
n = self.get_node_by_output_in_current_graph(o)
|
| 630 |
+
utils.make_sure(n is None, "output tensor named %s already exists in node: \n%s", o, n)
|
| 631 |
+
|
| 632 |
+
onnx_node = helper.make_node(op_type, inputs, outputs, name=name, domain=domain, **raw_attr)
|
| 633 |
+
|
| 634 |
+
for name2 in onnx_node.input:
|
| 635 |
+
self._register_input_name(name2, onnx_node)
|
| 636 |
+
|
| 637 |
+
if op_type in ["If", "Loop", "Scan"]:
|
| 638 |
+
# we force the op containing inner graphs not skipped during conversion.
|
| 639 |
+
skip_conversion = False
|
| 640 |
+
|
| 641 |
+
node = Node(onnx_node, self, skip_conversion=skip_conversion)
|
| 642 |
+
if onnx_attrs:
|
| 643 |
+
_ = [node.set_attr_onnx(a) for a in onnx_attrs]
|
| 644 |
+
|
| 645 |
+
for branch, body in branches.items():
|
| 646 |
+
node.set_body_graph_as_attr(branch, body)
|
| 647 |
+
|
| 648 |
+
if shapes:
|
| 649 |
+
utils.make_sure(len(shapes) == output_count,
|
| 650 |
+
"output shape count %s not equal to output count %s", len(shapes), output_count)
|
| 651 |
+
for i in range(output_count):
|
| 652 |
+
self.set_shape(node.output[i], shapes[i])
|
| 653 |
+
|
| 654 |
+
if dtypes:
|
| 655 |
+
utils.make_sure(len(dtypes) == output_count,
|
| 656 |
+
"output dtypes count %s not equal to output count %s", len(dtypes), output_count)
|
| 657 |
+
for i in range(output_count):
|
| 658 |
+
self.set_dtype(node.output[i], dtypes[i])
|
| 659 |
+
|
| 660 |
+
if (not shapes or not dtypes) and infer_shape_dtype:
|
| 661 |
+
self.update_node_shape_dtype(node, override=False)
|
| 662 |
+
|
| 663 |
+
logger.debug("Made node: %s\n%s", node.name, node.summary)
|
| 664 |
+
self._nodes.append(node)
|
| 665 |
+
return node
|
| 666 |
+
|
| 667 |
+
def append_node(self, node):
|
| 668 |
+
"Add a node to the graph."
|
| 669 |
+
output_shapes = node.output_shapes
|
| 670 |
+
output_dtypes = node.output_dtypes
|
| 671 |
+
node.graph = self
|
| 672 |
+
self._nodes.append(node)
|
| 673 |
+
self._nodes_by_name[node.name] = node
|
| 674 |
+
for i, name in enumerate(node.output):
|
| 675 |
+
self._output_to_node_name[name] = node.name
|
| 676 |
+
self.set_dtype(name, output_dtypes[i])
|
| 677 |
+
self.set_shape(name, output_shapes[i])
|
| 678 |
+
for name in node.input:
|
| 679 |
+
self._register_input_name(name, node)
|
| 680 |
+
|
| 681 |
+
def remove_node(self, node_name):
|
| 682 |
+
"""Remove node in current graph."""
|
| 683 |
+
utils.make_sure(node_name in self._nodes_by_name, "node %s not in current graph, cannot remove", node_name)
|
| 684 |
+
node = self.get_node_by_name(node_name)
|
| 685 |
+
del self._nodes_by_name[node_name]
|
| 686 |
+
if node_name in self.contained_graphs:
|
| 687 |
+
del self.contained_graphs[node_name]
|
| 688 |
+
|
| 689 |
+
if node in self.inputs:
|
| 690 |
+
self.inputs.remove(node)
|
| 691 |
+
|
| 692 |
+
for op_output in node.output:
|
| 693 |
+
del self._output_to_node_name[op_output]
|
| 694 |
+
|
| 695 |
+
if op_output in self._output_shapes:
|
| 696 |
+
del self._output_shapes[op_output]
|
| 697 |
+
if op_output in self._dtypes:
|
| 698 |
+
del self._dtypes[op_output]
|
| 699 |
+
|
| 700 |
+
for op_input in node.input:
|
| 701 |
+
utils.make_sure(
|
| 702 |
+
op_input in self._output_to_consumers,
|
| 703 |
+
"Input %r of node %r not found.", op_input, node_name)
|
| 704 |
+
self._unregister_input_name(op_input, node)
|
| 705 |
+
|
| 706 |
+
self._nodes.remove(node)
|
| 707 |
+
node.graph = None
|
| 708 |
+
|
| 709 |
+
def reset_nodes(self, ops):
|
| 710 |
+
"""Reset the graph with node list."""
|
| 711 |
+
remained_dtypes = {}
|
| 712 |
+
remained_shapes = {}
|
| 713 |
+
remained_sub_graphs = {}
|
| 714 |
+
for op in ops:
|
| 715 |
+
for op_output in op.output:
|
| 716 |
+
# this check should be removed once we make sure all output tensors have dtype/shape.
|
| 717 |
+
if op_output in self._dtypes:
|
| 718 |
+
remained_dtypes[op_output] = self._dtypes[op_output]
|
| 719 |
+
if op_output in self._output_shapes:
|
| 720 |
+
remained_shapes[op_output] = self._output_shapes[op_output]
|
| 721 |
+
|
| 722 |
+
if op.name in self.contained_graphs:
|
| 723 |
+
remained_sub_graphs[op.name] = self.contained_graphs[op.name]
|
| 724 |
+
|
| 725 |
+
self._nodes = ops
|
| 726 |
+
self.contained_graphs = remained_sub_graphs
|
| 727 |
+
self._nodes_by_name = {op.name: op for op in ops}
|
| 728 |
+
self._output_to_node_name = {}
|
| 729 |
+
self._output_to_consumers = {}
|
| 730 |
+
for op in ops:
|
| 731 |
+
for op_output in op.output:
|
| 732 |
+
self._output_to_node_name[op_output] = op.name
|
| 733 |
+
inps = op.input
|
| 734 |
+
for op_input in inps:
|
| 735 |
+
self._register_input_name(op_input, op)
|
| 736 |
+
|
| 737 |
+
for n in self.inputs:
|
| 738 |
+
if n not in ops:
|
| 739 |
+
raise ValueError("graph input " + n + " not exist")
|
| 740 |
+
for o in self.outputs:
|
| 741 |
+
if o not in self._output_to_node_name:
|
| 742 |
+
raise ValueError("graph output " + o + " not exist")
|
| 743 |
+
|
| 744 |
+
self._dtypes = remained_dtypes
|
| 745 |
+
self._output_shapes = remained_shapes
|
| 746 |
+
|
| 747 |
+
def is_empty_input(self, name):
|
| 748 |
+
# in ONNX, operation may have optional input and an empty string may be used
|
| 749 |
+
# in the place of an actual argument's name to indicate a missing argument
|
| 750 |
+
return name == utils.ONNX_EMPTY_INPUT
|
| 751 |
+
|
| 752 |
+
def check_integrity(self):
|
| 753 |
+
"""
|
| 754 |
+
Check graph integrity. Every node's input needs to associate with a node.
|
| 755 |
+
Return broken outputs.
|
| 756 |
+
"""
|
| 757 |
+
broken_outputs = set()
|
| 758 |
+
for node in self.get_nodes():
|
| 759 |
+
for inp in node.input:
|
| 760 |
+
if self.get_node_by_output(inp) is None and not self.is_empty_input(inp):
|
| 761 |
+
broken_outputs.add(inp)
|
| 762 |
+
return list(broken_outputs)
|
| 763 |
+
|
| 764 |
+
def update_node_shape_dtype(self, node, override=False):
|
| 765 |
+
"""Try the best to infer shapes and dtypes for outputs of the node,
|
| 766 |
+
by default, we respect TF shapes and dtypes.
|
| 767 |
+
"""
|
| 768 |
+
if node.is_const() or node.is_graph_input():
|
| 769 |
+
return
|
| 770 |
+
# NOTE: only support onnx node for now
|
| 771 |
+
if not utils.is_onnx_domain(node.domain):
|
| 772 |
+
return
|
| 773 |
+
|
| 774 |
+
logger.debug("Infer shape and dtype for [%s]", node.name)
|
| 775 |
+
# NOTE: shape inference for some ops need the input values of the op, e.g., Reshape
|
| 776 |
+
# op needs the "Shape" value to infer output shape.
|
| 777 |
+
initializers = []
|
| 778 |
+
for i, inp in enumerate(node.inputs):
|
| 779 |
+
if inp is None:
|
| 780 |
+
if not self.is_empty_input(node.input[i]):
|
| 781 |
+
if logger.isEnabledFor(logging.INFO):
|
| 782 |
+
logger.warning(
|
| 783 |
+
"[%s] infer a inexistent node: [%s], please check the code",
|
| 784 |
+
node.name, node.input[i]
|
| 785 |
+
)
|
| 786 |
+
continue
|
| 787 |
+
if inp.is_const():
|
| 788 |
+
t = inp.get_attr("value")
|
| 789 |
+
tensor = helper.get_attribute_value(t)
|
| 790 |
+
tensor.name = inp.output[0]
|
| 791 |
+
initializers.append(tensor)
|
| 792 |
+
|
| 793 |
+
input_shapes = [self.get_shape(i) for i in node.input]
|
| 794 |
+
input_dtypes = [self.get_dtype(i) for i in node.input]
|
| 795 |
+
|
| 796 |
+
shapes, dtypes = infer_onnx_shape_dtype(node, self._opset, input_shapes, input_dtypes, initializers)
|
| 797 |
+
if not shapes or not dtypes:
|
| 798 |
+
return
|
| 799 |
+
|
| 800 |
+
for output, shape, dtype in zip(node.output, shapes, dtypes):
|
| 801 |
+
if dtype == TensorProto.UNDEFINED:
|
| 802 |
+
logger.debug("Inferred dtype for [%s, type: %s] is UNDEFINED, SKIP", node.name, node.type)
|
| 803 |
+
else:
|
| 804 |
+
existing_dtype = self.get_dtype(output)
|
| 805 |
+
if existing_dtype is not None and existing_dtype != dtype:
|
| 806 |
+
if override:
|
| 807 |
+
logger.warning("Override dtype of %s from %s to %s", output, existing_dtype, dtype)
|
| 808 |
+
else:
|
| 809 |
+
dtype = existing_dtype
|
| 810 |
+
self.set_dtype(output, dtype)
|
| 811 |
+
logger.debug("Set dtype of [%s] to %s", output, dtype)
|
| 812 |
+
|
| 813 |
+
if shape is None:
|
| 814 |
+
logger.debug("Inferred shape for [%s, type: %s] is None, SKIP", node.name, node.type)
|
| 815 |
+
else:
|
| 816 |
+
existing_shape = self.get_shape(output)
|
| 817 |
+
if existing_shape is not None and not utils.are_shapes_equal(existing_shape, shape):
|
| 818 |
+
if override:
|
| 819 |
+
logger.warning("Override shape of %s from %s to %s", output, existing_shape, shape)
|
| 820 |
+
else:
|
| 821 |
+
shape = existing_shape
|
| 822 |
+
self.set_shape(output, shape)
|
| 823 |
+
logger.debug("Set shape of [%s] to %s", output, shape)
|
| 824 |
+
|
| 825 |
+
def update_proto(self, external_tensor_storage=None):
|
| 826 |
+
"""Update the onnx protobuf from out internal Node structure."""
|
| 827 |
+
for node in self._nodes:
|
| 828 |
+
node.update_proto(external_tensor_storage)
|
| 829 |
+
|
| 830 |
+
def get_nodes(self):
|
| 831 |
+
"""Get node list."""
|
| 832 |
+
return self._nodes
|
| 833 |
+
|
| 834 |
+
def get_node_by_output(self, output, search_in_parent_graphs=True):
|
| 835 |
+
"""Get node by node output id recursively going through nested graphs.
|
| 836 |
+
Args:
|
| 837 |
+
search_in_parent_graphs: search in all parent graphs
|
| 838 |
+
"""
|
| 839 |
+
ret = None
|
| 840 |
+
g = self
|
| 841 |
+
while not ret and g:
|
| 842 |
+
ret = g.get_node_by_output_in_current_graph(output)
|
| 843 |
+
if ret:
|
| 844 |
+
return ret
|
| 845 |
+
|
| 846 |
+
if not search_in_parent_graphs:
|
| 847 |
+
break
|
| 848 |
+
g = g.parent_graph
|
| 849 |
+
return ret
|
| 850 |
+
|
| 851 |
+
def get_node_by_output_in_current_graph(self, output):
|
| 852 |
+
"""Get node by node output id."""
|
| 853 |
+
name = self._output_to_node_name.get(output)
|
| 854 |
+
ret = None
|
| 855 |
+
if name:
|
| 856 |
+
ret = self._nodes_by_name.get(name)
|
| 857 |
+
return ret
|
| 858 |
+
|
| 859 |
+
def get_node_by_name(self, name):
|
| 860 |
+
"""Get node by name."""
|
| 861 |
+
ret = self._nodes_by_name.get(name)
|
| 862 |
+
return ret
|
| 863 |
+
|
| 864 |
+
def set_node_by_name(self, node):
|
| 865 |
+
"""Set node by name."""
|
| 866 |
+
self._nodes_by_name[node.name] = node
|
| 867 |
+
for op_output in node.output:
|
| 868 |
+
self._output_to_node_name[op_output] = node.name
|
| 869 |
+
for name in node.input:
|
| 870 |
+
self._register_input_name(name, node)
|
| 871 |
+
|
| 872 |
+
def change_node_name(self, node, new_name):
|
| 873 |
+
"""Remove node in current graph."""
|
| 874 |
+
utils.make_sure(new_name not in self._nodes_by_name, "node %s not unique ", new_name)
|
| 875 |
+
dtypes = node.output_dtypes
|
| 876 |
+
shapes = node.output_shapes
|
| 877 |
+
self.remove_node(node.name)
|
| 878 |
+
new_node = self.make_node(node.type, node.input, output_count=len(node.output),
|
| 879 |
+
attr=node.attr, dtypes=dtypes, shapes=shapes, name=new_name)
|
| 880 |
+
for i, old_output in enumerate(node.output):
|
| 881 |
+
new_output = port_name(new_name, i)
|
| 882 |
+
for j, k in enumerate(self.outputs):
|
| 883 |
+
if k == old_output:
|
| 884 |
+
self.outputs[j] = new_output
|
| 885 |
+
break
|
| 886 |
+
self.replace_all_inputs(old_output, new_output, ops=self.get_nodes())
|
| 887 |
+
return new_node
|
| 888 |
+
|
| 889 |
+
def add_graph_input(self, name, dtype=None, shape=None):
|
| 890 |
+
"""Add placeholder node as graph's input. Order matters only for subgraph.
|
| 891 |
+
Placeholders in original graph are assumed for main graph, order not matters.
|
| 892 |
+
"""
|
| 893 |
+
if dtype is None:
|
| 894 |
+
dtype = self.get_dtype(name)
|
| 895 |
+
|
| 896 |
+
if shape is None:
|
| 897 |
+
shape = self.get_shape(name)
|
| 898 |
+
|
| 899 |
+
new_node = self.make_node("Placeholder", [], outputs=[name], dtypes=[dtype], shapes=[shape])
|
| 900 |
+
self.inputs.append(new_node)
|
| 901 |
+
|
| 902 |
+
def add_graph_input_with_default(self, name, default_const, dtype=None, shape=None):
|
| 903 |
+
"""Add placeholderwithdefault."""
|
| 904 |
+
if dtype is None:
|
| 905 |
+
dtype = self.get_dtype(name)
|
| 906 |
+
|
| 907 |
+
if shape is None:
|
| 908 |
+
shape = self.get_shape(name)
|
| 909 |
+
|
| 910 |
+
default_const_name = port_name(make_name("{}_default".format(name)))
|
| 911 |
+
default_const.output = [default_const_name]
|
| 912 |
+
new_node = self.make_node("PlaceholderWithDefault", [default_const_name], outputs=[name],
|
| 913 |
+
dtypes=[dtype], shapes=[shape])
|
| 914 |
+
self.inputs.append(new_node)
|
| 915 |
+
|
| 916 |
+
def add_graph_output(self, name, dtype=None, shape=None):
|
| 917 |
+
"""Add node output as graph's output."""
|
| 918 |
+
utils.make_sure(name in self._output_to_node_name, "output %s not exist in the graph", name)
|
| 919 |
+
|
| 920 |
+
if dtype is None:
|
| 921 |
+
dtype = self.get_dtype(name)
|
| 922 |
+
|
| 923 |
+
if shape is None:
|
| 924 |
+
shape = self.get_shape(name)
|
| 925 |
+
|
| 926 |
+
if name not in self.outputs:
|
| 927 |
+
utils.make_sure(shape is not None, "shape for output %s should not be None", name)
|
| 928 |
+
utils.make_sure(dtype is not None, "dtype for output %s should not be None", name)
|
| 929 |
+
self.outputs.append(name)
|
| 930 |
+
self.set_shape(name, shape)
|
| 931 |
+
self.set_dtype(name, dtype)
|
| 932 |
+
else:
|
| 933 |
+
raise ValueError("graph output " + name + " already exists")
|
| 934 |
+
|
| 935 |
+
def get_dtype(self, name):
|
| 936 |
+
"""Get dtype for node."""
|
| 937 |
+
node = self.get_node_by_output(name, search_in_parent_graphs=True)
|
| 938 |
+
return node.graph._dtypes.get(name) if node else None
|
| 939 |
+
|
| 940 |
+
def set_dtype(self, name, dtype):
|
| 941 |
+
"""Set dtype for node."""
|
| 942 |
+
node = self.get_node_by_output(name, search_in_parent_graphs=True)
|
| 943 |
+
node.graph._dtypes[name] = dtype
|
| 944 |
+
|
| 945 |
+
def copy_dtype(self, src_name, dst_name):
|
| 946 |
+
"""Copy dtype from another node."""
|
| 947 |
+
dtype = self.get_dtype(src_name)
|
| 948 |
+
self.set_dtype(dst_name, dtype)
|
| 949 |
+
|
| 950 |
+
def get_shape(self, name):
|
| 951 |
+
"""Get shape for node."""
|
| 952 |
+
utils.make_sure(isinstance(name, six.text_type), "get_shape name is invalid type: %s", name)
|
| 953 |
+
node = self.get_node_by_output(name, search_in_parent_graphs=True)
|
| 954 |
+
shape = node.graph._output_shapes.get(name) if node else None
|
| 955 |
+
if shape:
|
| 956 |
+
for i, v in enumerate(shape):
|
| 957 |
+
if v is None:
|
| 958 |
+
# pylint: disable=unsupported-assignment-operation
|
| 959 |
+
shape[i] = -1
|
| 960 |
+
# hack to allow utils.ONNX_UNKNOWN_DIMENSION to override batchsize if needed.
|
| 961 |
+
# default is -1.
|
| 962 |
+
if shape[0] == -1:
|
| 963 |
+
# pylint: disable=unsupported-assignment-operation
|
| 964 |
+
shape[0] = utils.ONNX_UNKNOWN_DIMENSION
|
| 965 |
+
return shape
|
| 966 |
+
return shape
|
| 967 |
+
|
| 968 |
+
def get_rank(self, name):
|
| 969 |
+
"""Returns len(get_shape(name)) or None if shape is None"""
|
| 970 |
+
shape = self.get_shape(name)
|
| 971 |
+
if shape is None:
|
| 972 |
+
return None
|
| 973 |
+
return len(shape)
|
| 974 |
+
|
| 975 |
+
def set_shape(self, name, val):
|
| 976 |
+
"""Set new shape of node."""
|
| 977 |
+
if isinstance(val, np.ndarray):
|
| 978 |
+
val = val.tolist()
|
| 979 |
+
if isinstance(val, tuple):
|
| 980 |
+
val = list(val)
|
| 981 |
+
node = self.get_node_by_output(name, search_in_parent_graphs=True)
|
| 982 |
+
utils.make_sure(node is not None, "cannot find node by output id %s", name)
|
| 983 |
+
node.graph._output_shapes[name] = val
|
| 984 |
+
|
| 985 |
+
def copy_shape(self, input_name, output_name):
|
| 986 |
+
"""Copy shape from another node."""
|
| 987 |
+
shape = self.get_shape(input_name)
|
| 988 |
+
# assert shape is not None
|
| 989 |
+
if shape is not None:
|
| 990 |
+
self.set_shape(output_name, shape)
|
| 991 |
+
|
| 992 |
+
def topological_sort(self, ops):
|
| 993 |
+
"""Topological sort of graph."""
|
| 994 |
+
# sort by name, the result will be reversed alphabeta
|
| 995 |
+
ops.sort(key=lambda op: op.name)
|
| 996 |
+
|
| 997 |
+
def _push_stack(stack, node, in_stack):
|
| 998 |
+
stack.append(node)
|
| 999 |
+
if node in in_stack:
|
| 1000 |
+
raise ValueError('Graph has cycles, node=' + ops[node].name)
|
| 1001 |
+
in_stack[node] = True
|
| 1002 |
+
|
| 1003 |
+
def _get_unvisited_child(g, node, not_visited):
|
| 1004 |
+
for child in g[node]:
|
| 1005 |
+
if child in not_visited:
|
| 1006 |
+
return child
|
| 1007 |
+
return -1
|
| 1008 |
+
|
| 1009 |
+
n = len(ops)
|
| 1010 |
+
g = [[] for _ in range(n)]
|
| 1011 |
+
op_name_to_index = {}
|
| 1012 |
+
for i, op in enumerate(ops):
|
| 1013 |
+
op_name_to_index[op.name] = i
|
| 1014 |
+
|
| 1015 |
+
for i, op in enumerate(ops):
|
| 1016 |
+
all_input = set(op.input)
|
| 1017 |
+
implicit_inputs = op.get_implicit_inputs()
|
| 1018 |
+
all_input |= set(implicit_inputs)
|
| 1019 |
+
# remove those empty inputs
|
| 1020 |
+
all_input = list(filter(lambda a: a != '', all_input))
|
| 1021 |
+
for inp in sorted(all_input):
|
| 1022 |
+
j = self.get_node_by_output(inp)
|
| 1023 |
+
utils.make_sure(j is not None, "Cannot find node with output %r in graph %r", inp, self.graph_name)
|
| 1024 |
+
if self.parent_graph and j.name not in op_name_to_index:
|
| 1025 |
+
# there might be some outer-scoped inputs for an inner Graph.
|
| 1026 |
+
pass
|
| 1027 |
+
else:
|
| 1028 |
+
g[op_name_to_index[j.name]].append(i)
|
| 1029 |
+
|
| 1030 |
+
# label for each op. highest = sink nodes.
|
| 1031 |
+
label = [-1 for _ in range(n)]
|
| 1032 |
+
stack = []
|
| 1033 |
+
in_stack = dict()
|
| 1034 |
+
not_visited = dict.fromkeys(range(n))
|
| 1035 |
+
label_counter = n - 1
|
| 1036 |
+
|
| 1037 |
+
while not_visited:
|
| 1038 |
+
node = list(not_visited.keys())[0]
|
| 1039 |
+
_push_stack(stack, node, in_stack)
|
| 1040 |
+
while stack:
|
| 1041 |
+
node = _get_unvisited_child(g, stack[-1], not_visited)
|
| 1042 |
+
if node != -1:
|
| 1043 |
+
_push_stack(stack, node, in_stack)
|
| 1044 |
+
else:
|
| 1045 |
+
node = stack.pop()
|
| 1046 |
+
in_stack.pop(node)
|
| 1047 |
+
not_visited.pop(node)
|
| 1048 |
+
label[node] = label_counter
|
| 1049 |
+
label_counter -= 1
|
| 1050 |
+
|
| 1051 |
+
ret = [x for _, x in sorted(zip(label, ops))]
|
| 1052 |
+
self.reset_nodes(ret)
|
| 1053 |
+
|
| 1054 |
+
def make_graph(self, doc, graph_name=None, external_tensor_storage=None):
|
| 1055 |
+
"""
|
| 1056 |
+
Create GraphProto for onnx from internal graph.
|
| 1057 |
+
Args:
|
| 1058 |
+
optimize: optimize graph via onnx
|
| 1059 |
+
doc: text for doc string of the graph
|
| 1060 |
+
"""
|
| 1061 |
+
graph_name = graph_name or self.graph_name
|
| 1062 |
+
self.delete_unused_nodes(self.outputs)
|
| 1063 |
+
self.topological_sort(self.get_nodes())
|
| 1064 |
+
self.update_proto(external_tensor_storage)
|
| 1065 |
+
|
| 1066 |
+
# TODO: we'd want to do something like this so that transpose optimizer is active
|
| 1067 |
+
# for all (unit) tests
|
| 1068 |
+
# if optimize:
|
| 1069 |
+
# from tf2onnx.optimizer.transpose_optimizer import TransposeOptimizer
|
| 1070 |
+
# optimizer = TransposeOptimizer(self, False)
|
| 1071 |
+
# optimizer.optimize()
|
| 1072 |
+
ops = []
|
| 1073 |
+
const_ops = []
|
| 1074 |
+
graph_inputs = self.inputs.copy()
|
| 1075 |
+
for op in self.get_nodes():
|
| 1076 |
+
if op.is_const():
|
| 1077 |
+
const_ops.append(op)
|
| 1078 |
+
elif op.is_graph_input():
|
| 1079 |
+
if op not in graph_inputs:
|
| 1080 |
+
graph_inputs.append(op)
|
| 1081 |
+
else:
|
| 1082 |
+
ops.append(op)
|
| 1083 |
+
|
| 1084 |
+
# create initializers for placeholder with default nodes
|
| 1085 |
+
initializers = []
|
| 1086 |
+
placeholder_default_const_ops = []
|
| 1087 |
+
for op in graph_inputs:
|
| 1088 |
+
if op.type == "PlaceholderWithDefault":
|
| 1089 |
+
utils.make_sure(op.inputs[0] is not None, "Cannot find node with output {}".format(op.input[0]))
|
| 1090 |
+
utils.make_sure(op.inputs[0].is_const(),
|
| 1091 |
+
"non-const default value for PlaceholderWithDefault node '%s' is not supported. "
|
| 1092 |
+
"Use the --use_default or --ignore_default flags to convert this node.", op.name)
|
| 1093 |
+
# copy the tensor value, set its name to current node's output, add as initializer
|
| 1094 |
+
value = op.inputs[0].get_tensor_value(as_list=False)
|
| 1095 |
+
tensor = numpy_helper.from_array(value, op.output[0])
|
| 1096 |
+
initializers.append(tensor)
|
| 1097 |
+
placeholder_default_const_ops.append(op.inputs[0])
|
| 1098 |
+
|
| 1099 |
+
# create initializers for constant nodes
|
| 1100 |
+
const_ops = [op for op in const_ops if op not in placeholder_default_const_ops]
|
| 1101 |
+
for op in const_ops:
|
| 1102 |
+
# not to use numpy_helper.from_array to create a new tensor
|
| 1103 |
+
# because sometimes onnx will have a bug that only check the tensor data in specific field
|
| 1104 |
+
# such as at upsample it only checks the float_data field.
|
| 1105 |
+
t = op.get_value_attr(external_tensor_storage)
|
| 1106 |
+
tensor = helper.get_attribute_value(t)
|
| 1107 |
+
tensor.name = op.output[0]
|
| 1108 |
+
initializers.append(tensor)
|
| 1109 |
+
|
| 1110 |
+
# create input_tensor_values
|
| 1111 |
+
input_ids = [op.output[0] for op in graph_inputs]
|
| 1112 |
+
# onnx with IR version below 4 requires initializer should be in inputs.
|
| 1113 |
+
# here we check opset version rather than IR version for the reason:
|
| 1114 |
+
# https://github.com/onnx/tensorflow-onnx/pull/557
|
| 1115 |
+
# opset 9 come with IR 4.
|
| 1116 |
+
if self.opset < 9:
|
| 1117 |
+
input_ids += [op.output[0] for op in const_ops]
|
| 1118 |
+
|
| 1119 |
+
input_tensor_values = self.make_onnx_graph_io(input_ids)
|
| 1120 |
+
|
| 1121 |
+
# create output_tensor_values
|
| 1122 |
+
output_tensor_values = self.make_onnx_graph_io(self.outputs)
|
| 1123 |
+
|
| 1124 |
+
# create graph proto
|
| 1125 |
+
graph = helper.make_graph([op.op for op in ops],
|
| 1126 |
+
graph_name,
|
| 1127 |
+
input_tensor_values,
|
| 1128 |
+
output_tensor_values,
|
| 1129 |
+
initializer=initializers,
|
| 1130 |
+
doc_string=doc)
|
| 1131 |
+
|
| 1132 |
+
return graph
|
| 1133 |
+
|
| 1134 |
+
def make_model(self, graph_doc, optimize=False, graph_name="tf2onnx", external_tensor_storage=None, **kwargs):
|
| 1135 |
+
"""
|
| 1136 |
+
Create final ModelProto for onnx from internal graph.
|
| 1137 |
+
Args:
|
| 1138 |
+
optimize: optimize graph via onnx
|
| 1139 |
+
doc: text for doc string of the model
|
| 1140 |
+
"""
|
| 1141 |
+
graph = self.make_graph(graph_doc, graph_name, external_tensor_storage)
|
| 1142 |
+
|
| 1143 |
+
if "producer_name" not in kwargs:
|
| 1144 |
+
kwargs = {"producer_name": "tf2onnx",
|
| 1145 |
+
"producer_version": __version__}
|
| 1146 |
+
|
| 1147 |
+
if "opset_imports" not in kwargs:
|
| 1148 |
+
opsets = []
|
| 1149 |
+
imp = OperatorSetIdProto()
|
| 1150 |
+
imp.version = self._opset
|
| 1151 |
+
opsets.append(imp)
|
| 1152 |
+
if self.extra_opset is not None:
|
| 1153 |
+
opsets.extend(self.extra_opset)
|
| 1154 |
+
kwargs["opset_imports"] = opsets
|
| 1155 |
+
model_proto = helper.make_model(graph, **kwargs)
|
| 1156 |
+
|
| 1157 |
+
utils.make_sure(self.opset in constants.OPSET_TO_IR_VERSION,
|
| 1158 |
+
"Opset %s is not supported yet. Please use a lower opset" % self.opset)
|
| 1159 |
+
|
| 1160 |
+
# set the IR version based on opset
|
| 1161 |
+
try:
|
| 1162 |
+
model_proto.ir_version = constants.OPSET_TO_IR_VERSION.get(self.opset, model_proto.ir_version)
|
| 1163 |
+
except: # pylint: disable=bare-except
|
| 1164 |
+
logger.error("ir_version override failed - install the latest onnx version")
|
| 1165 |
+
|
| 1166 |
+
# optimize the model proto.
|
| 1167 |
+
# TODO: this is disabled by default because of bugs in fuse_consecutive_transposes
|
| 1168 |
+
if optimize:
|
| 1169 |
+
model_proto = optimizer.optimize(model_proto)
|
| 1170 |
+
return model_proto
|
| 1171 |
+
|
| 1172 |
+
def make_onnx_graph_io(self, ids):
|
| 1173 |
+
"""Create tensor_value_info for passed input/output ids."""
|
| 1174 |
+
tensor_value_infos = []
|
| 1175 |
+
for name in ids:
|
| 1176 |
+
dtype = self.get_dtype(name)
|
| 1177 |
+
shape = self.get_shape(name)
|
| 1178 |
+
|
| 1179 |
+
utils.make_sure(dtype is not None, "missing output dtype for " + name)
|
| 1180 |
+
# TODO: allow None output shape or not? e.g. shape=(?,)
|
| 1181 |
+
#utils.make_sure(shape is not None, "missing output shape for " + name)
|
| 1182 |
+
if shape is None: logger.warning("missing output shape for %s", name)
|
| 1183 |
+
|
| 1184 |
+
v = utils.make_onnx_inputs_outputs(name, dtype, shape)
|
| 1185 |
+
tensor_value_infos.append(v)
|
| 1186 |
+
return tensor_value_infos
|
| 1187 |
+
|
| 1188 |
+
def dump_graph(self):
|
| 1189 |
+
"""Dump graph with shapes (helpful for debugging)."""
|
| 1190 |
+
for node in self.get_nodes():
|
| 1191 |
+
input_names = ["{}{}".format(n, self.get_shape(n)) for n in node.input]
|
| 1192 |
+
logger.debug("%s %s %s %s",
|
| 1193 |
+
node.type,
|
| 1194 |
+
self.get_shape(node.output[0]),
|
| 1195 |
+
node.name,
|
| 1196 |
+
", ".join(input_names))
|
| 1197 |
+
|
| 1198 |
+
def follow_inputs(self, node, num, space=""):
|
| 1199 |
+
"""Follow inputs for (helpful for debugging)."""
|
| 1200 |
+
val = []
|
| 1201 |
+
top = space == ""
|
| 1202 |
+
if num == 0:
|
| 1203 |
+
return []
|
| 1204 |
+
val.append("{}{} {} {}".format(space, node.type, node.name, self.get_shape(port_name(node.name))))
|
| 1205 |
+
space += " "
|
| 1206 |
+
for j in node.inputs:
|
| 1207 |
+
val.extend(self.follow_inputs(j, num - 1, space))
|
| 1208 |
+
if top:
|
| 1209 |
+
print("\n".join(reversed(val)))
|
| 1210 |
+
print()
|
| 1211 |
+
return []
|
| 1212 |
+
return val
|
| 1213 |
+
|
| 1214 |
+
def dump_node_statistics(self):
|
| 1215 |
+
op_cnt = collections.Counter()
|
| 1216 |
+
for n in self.get_nodes():
|
| 1217 |
+
op_cnt[n.type] += 1
|
| 1218 |
+
body_graphs = n.get_body_graphs()
|
| 1219 |
+
if body_graphs:
|
| 1220 |
+
for b_g in body_graphs.values():
|
| 1221 |
+
op_cnt += b_g.dump_node_statistics()
|
| 1222 |
+
|
| 1223 |
+
return op_cnt
|
| 1224 |
+
|
| 1225 |
+
def remove_input(self, node, to_be_removed, input_index=None):
|
| 1226 |
+
"""Remove input from Node.
|
| 1227 |
+
Args:
|
| 1228 |
+
node: the node we expect the input on
|
| 1229 |
+
to_be_removed: the node name we want to remove
|
| 1230 |
+
input_index: if not None, index of the input to be removed,
|
| 1231 |
+
the method is more efficient if *input_index* is specified,
|
| 1232 |
+
otherwise, it has to look for every input named *old_input*.
|
| 1233 |
+
"""
|
| 1234 |
+
assert isinstance(node, Node) and isinstance(to_be_removed, six.text_type)
|
| 1235 |
+
if input_index is not None:
|
| 1236 |
+
assert node.input[input_index] == to_be_removed
|
| 1237 |
+
if node.input[input_index] in self._output_to_consumers:
|
| 1238 |
+
to_ops = self._output_to_consumers[node.input[input_index]]
|
| 1239 |
+
if node.name in to_ops:
|
| 1240 |
+
to_ops.remove(node.name)
|
| 1241 |
+
del node.input[input_index]
|
| 1242 |
+
return
|
| 1243 |
+
|
| 1244 |
+
for i, name in enumerate(node.input):
|
| 1245 |
+
if name == to_be_removed:
|
| 1246 |
+
utils.make_sure(
|
| 1247 |
+
node.input.count(node.input[i]) <= 1,
|
| 1248 |
+
"Node %r takes multiple times the same input %r. This case is not handled.",
|
| 1249 |
+
node.name, node.input[i])
|
| 1250 |
+
self._unregister_input_name(node.input[i], node)
|
| 1251 |
+
del node.input[i]
|
| 1252 |
+
break
|
| 1253 |
+
|
| 1254 |
+
# don't remove output from parent since others might depend on it
|
| 1255 |
+
|
| 1256 |
+
def insert_new_node_on_input(self, node, op_type, input_name, name=None, domain=None, **kwargs):
|
| 1257 |
+
"""Create and insert a new node into the graph.
|
| 1258 |
+
Args:
|
| 1259 |
+
node: we want to replace the input for this node
|
| 1260 |
+
op_type: type for new operation
|
| 1261 |
+
input_name: the name(s) of the outputs above us
|
| 1262 |
+
if scalar, new node placed above input_name
|
| 1263 |
+
if list, new node placed above input_name[0]. list is inputs into new node
|
| 1264 |
+
name: the name of the new op
|
| 1265 |
+
kwargs: attributes of the new node
|
| 1266 |
+
|
| 1267 |
+
Returns:
|
| 1268 |
+
node that was inserted
|
| 1269 |
+
"""
|
| 1270 |
+
if name is None:
|
| 1271 |
+
name = utils.make_name(node.name)
|
| 1272 |
+
new_output = port_name(name)
|
| 1273 |
+
if not isinstance(input_name, list):
|
| 1274 |
+
input_name = [input_name]
|
| 1275 |
+
|
| 1276 |
+
new_node = self.make_node(op_type, input_name, attr=kwargs, outputs=[new_output], name=name, domain=domain)
|
| 1277 |
+
for i, n in enumerate(node.input):
|
| 1278 |
+
if n == input_name[0]:
|
| 1279 |
+
self.replace_input(node, node.input[i], new_output, i)
|
| 1280 |
+
break
|
| 1281 |
+
return new_node
|
| 1282 |
+
|
| 1283 |
+
def insert_node_on_output(self, node, output_name=None):
|
| 1284 |
+
"""
|
| 1285 |
+
The inserted node takes the *output_name* as input and produces a
|
| 1286 |
+
new output. The function goes through every node taking *output_name*
|
| 1287 |
+
and replaces it by the new output name.
|
| 1288 |
+
"""
|
| 1289 |
+
if output_name is None:
|
| 1290 |
+
output_name = node.input[0]
|
| 1291 |
+
new_output = node.output[0]
|
| 1292 |
+
|
| 1293 |
+
to_replace = [self.get_node_by_name(n) for n in self._output_to_consumers[output_name]]
|
| 1294 |
+
to_replace = [n for n in to_replace if n != node]
|
| 1295 |
+
self.replace_all_inputs(output_name, new_output, ops=to_replace)
|
| 1296 |
+
return node
|
| 1297 |
+
|
| 1298 |
+
def insert_new_node_on_output(self, op_type, output_name=None, name=None, inputs=None, domain=None, **kwargs):
|
| 1299 |
+
"""Create and insert a new node into the graph.
|
| 1300 |
+
It then calls insert_node_on_output.
|
| 1301 |
+
|
| 1302 |
+
Args:
|
| 1303 |
+
op_type: type for new operation
|
| 1304 |
+
output_name: the names of the outputs above us
|
| 1305 |
+
name: the name of the new op
|
| 1306 |
+
kwargs: attributes of the new node
|
| 1307 |
+
|
| 1308 |
+
Returns:
|
| 1309 |
+
node that was inserted
|
| 1310 |
+
"""
|
| 1311 |
+
utils.make_sure(isinstance(output_name, six.text_type), "output_name's type is not expected: %s",
|
| 1312 |
+
type(output_name))
|
| 1313 |
+
utils.make_sure(isinstance(op_type, six.text_type), "op_type's type is not expected: %s",
|
| 1314 |
+
type(op_type))
|
| 1315 |
+
utils.make_sure(output_name is not None, "output_name cannot be None for op_type=%r.", op_type)
|
| 1316 |
+
|
| 1317 |
+
if inputs is None:
|
| 1318 |
+
inputs = [output_name]
|
| 1319 |
+
if name is None:
|
| 1320 |
+
name = utils.make_name(op_type)
|
| 1321 |
+
|
| 1322 |
+
new_output = port_name(name)
|
| 1323 |
+
new_node = self.make_node(op_type, inputs, attr=kwargs, outputs=[new_output], name=name, domain=domain)
|
| 1324 |
+
return self.insert_node_on_output(new_node, output_name)
|
| 1325 |
+
|
| 1326 |
+
def find_output_consumers(self, output_name):
|
| 1327 |
+
"""Find all nodes consuming a given output."""
|
| 1328 |
+
if output_name in self._output_to_consumers:
|
| 1329 |
+
ops = self._output_to_consumers[output_name]
|
| 1330 |
+
ops = [self.get_node_by_name(n) for n in ops]
|
| 1331 |
+
else:
|
| 1332 |
+
ops = [] # self.get_nodes()
|
| 1333 |
+
nodes = []
|
| 1334 |
+
for node in ops:
|
| 1335 |
+
if node is None:
|
| 1336 |
+
continue
|
| 1337 |
+
if output_name in node.input:
|
| 1338 |
+
nodes.append(node)
|
| 1339 |
+
|
| 1340 |
+
# find consumers in sub graphs
|
| 1341 |
+
if output_name in self._input_to_graph:
|
| 1342 |
+
for g in self._input_to_graph[output_name].values():
|
| 1343 |
+
nodes.extend(g.find_output_consumers(output_name))
|
| 1344 |
+
return nodes
|
| 1345 |
+
|
| 1346 |
+
def _register_input_name(self, input_name, node, only_graph=False):
|
| 1347 |
+
"Register node taking a specific input."
|
| 1348 |
+
if not only_graph:
|
| 1349 |
+
if input_name not in self._output_to_consumers:
|
| 1350 |
+
self._output_to_consumers[input_name] = set()
|
| 1351 |
+
self._output_to_consumers[input_name].add(node.name)
|
| 1352 |
+
if self.parent_graph is not None:
|
| 1353 |
+
if input_name not in self.parent_graph._input_to_graph:
|
| 1354 |
+
self.parent_graph._input_to_graph[input_name] = {}
|
| 1355 |
+
self.parent_graph._input_to_graph[input_name][id(self)] = self
|
| 1356 |
+
self.parent_graph._register_input_name(input_name, node, only_graph=True)
|
| 1357 |
+
|
| 1358 |
+
def _unregister_input_name(self, input_name, node, only_graph=False):
|
| 1359 |
+
"Unregister node taking a specific input."
|
| 1360 |
+
node_name = node.name
|
| 1361 |
+
if not only_graph:
|
| 1362 |
+
if input_name in self._output_to_consumers[input_name]:
|
| 1363 |
+
if node_name in self._output_to_consumers[input_name]:
|
| 1364 |
+
self._output_to_consumers[input_name].remove(node_name)
|
| 1365 |
+
if (self.parent_graph is not None and
|
| 1366 |
+
input_name in self.parent_graph._input_to_graph and
|
| 1367 |
+
id(self) in self.parent_graph._input_to_graph[input_name]):
|
| 1368 |
+
del self.parent_graph._input_to_graph[input_name][id(self)]
|
| 1369 |
+
self.parent_graph._unregister_input_name(input_name, node, only_graph=True)
|
| 1370 |
+
|
| 1371 |
+
def replace_all_inputs(self, old_input, new_input, ops=None):
|
| 1372 |
+
"""
|
| 1373 |
+
Replace all inputs pointing to old_input with new_input.
|
| 1374 |
+
*ops* is used if defined, otherwise `_output_to_consumers`
|
| 1375 |
+
is used to determine the impacted nodes.
|
| 1376 |
+
"""
|
| 1377 |
+
if old_input == new_input:
|
| 1378 |
+
return
|
| 1379 |
+
if new_input not in self._output_to_consumers:
|
| 1380 |
+
self._output_to_consumers[new_input] = set()
|
| 1381 |
+
|
| 1382 |
+
if ops is not None:
|
| 1383 |
+
keep_ops = True
|
| 1384 |
+
elif old_input in self._output_to_consumers:
|
| 1385 |
+
ops = list(
|
| 1386 |
+
filter(lambda a: a is not None,
|
| 1387 |
+
map(self.get_node_by_name, self._output_to_consumers[old_input])))
|
| 1388 |
+
keep_ops = False
|
| 1389 |
+
else:
|
| 1390 |
+
ops = []
|
| 1391 |
+
keep_ops = False
|
| 1392 |
+
|
| 1393 |
+
for node in ops:
|
| 1394 |
+
assert node is not None
|
| 1395 |
+
if old_input in node.input and new_input in node.output:
|
| 1396 |
+
raise RuntimeError("creating a circle in the graph is not allowed: " + node.name)
|
| 1397 |
+
self._register_input_name(new_input, node)
|
| 1398 |
+
|
| 1399 |
+
for i, input_name in enumerate(node.input):
|
| 1400 |
+
if input_name == old_input:
|
| 1401 |
+
self.replace_input(node, node.input[i], new_input, i)
|
| 1402 |
+
|
| 1403 |
+
# modify references in sub graphs
|
| 1404 |
+
if old_input in self._input_to_graph:
|
| 1405 |
+
for g in self._input_to_graph[old_input].values():
|
| 1406 |
+
g.replace_all_inputs(old_input, new_input,
|
| 1407 |
+
ops=g.get_nodes() if keep_ops else None)
|
| 1408 |
+
|
| 1409 |
+
def replace_input(self, node, old_input, new_input, input_index=None):
|
| 1410 |
+
"""
|
| 1411 |
+
Replace one input in a node.
|
| 1412 |
+
The method is more efficient if *input_index* is specified.
|
| 1413 |
+
Otherwise, it renames every output named *old_input*.
|
| 1414 |
+
"""
|
| 1415 |
+
assert isinstance(node, Node) and isinstance(old_input, six.text_type) and isinstance(new_input, six.text_type)
|
| 1416 |
+
is_replaced = False
|
| 1417 |
+
if input_index is None:
|
| 1418 |
+
for i, input_name in enumerate(node.input):
|
| 1419 |
+
if input_name == old_input:
|
| 1420 |
+
node.input[i] = new_input
|
| 1421 |
+
is_replaced = True
|
| 1422 |
+
elif node.input[input_index] == old_input:
|
| 1423 |
+
node.input[input_index] = new_input
|
| 1424 |
+
is_replaced = True
|
| 1425 |
+
else:
|
| 1426 |
+
raise RuntimeError("Unable to replace input %r into %r for node %r." % (old_input, new_input, node.name))
|
| 1427 |
+
|
| 1428 |
+
to_ops = self._output_to_consumers.get(old_input, None)
|
| 1429 |
+
if to_ops is not None:
|
| 1430 |
+
if node.name in to_ops:
|
| 1431 |
+
# A node may take twice the same entry.
|
| 1432 |
+
to_ops.remove(node.name)
|
| 1433 |
+
|
| 1434 |
+
self._register_input_name(new_input, node)
|
| 1435 |
+
return is_replaced
|
| 1436 |
+
|
| 1437 |
+
def replace_inputs(self, node, new_inputs):
|
| 1438 |
+
"""Replace node inputs."""
|
| 1439 |
+
assert isinstance(node, Node) and isinstance(new_inputs, list)
|
| 1440 |
+
|
| 1441 |
+
for old_input in node.input:
|
| 1442 |
+
to_ops = self._output_to_consumers.get(old_input, None)
|
| 1443 |
+
if to_ops is not None and old_input in to_ops:
|
| 1444 |
+
# To avoid issues when a node
|
| 1445 |
+
# takes twice the same entry.
|
| 1446 |
+
to_ops.remove(old_input)
|
| 1447 |
+
|
| 1448 |
+
for input_name in new_inputs:
|
| 1449 |
+
assert isinstance(input_name, six.text_type)
|
| 1450 |
+
self._register_input_name(input_name, node)
|
| 1451 |
+
|
| 1452 |
+
node.input = new_inputs
|
| 1453 |
+
return True
|
| 1454 |
+
|
| 1455 |
+
def _extract_sub_graph_nodes(self, dest_node, input_checker=None):
|
| 1456 |
+
"""Return nodes of subgraph ending with dest_node.
|
| 1457 |
+
Args:
|
| 1458 |
+
dest_node: output node of the subgraph to find
|
| 1459 |
+
input_checker: customized input check function: bool func(node)
|
| 1460 |
+
|
| 1461 |
+
Return:
|
| 1462 |
+
a set of nodes
|
| 1463 |
+
"""
|
| 1464 |
+
res_set = set()
|
| 1465 |
+
if not dest_node or (input_checker and input_checker(dest_node) is False):
|
| 1466 |
+
return res_set
|
| 1467 |
+
|
| 1468 |
+
processing_set = set([dest_node])
|
| 1469 |
+
while processing_set:
|
| 1470 |
+
top_node = processing_set.pop()
|
| 1471 |
+
res_set.add(top_node)
|
| 1472 |
+
all_inputs = top_node.input + list(top_node.get_implicit_inputs())
|
| 1473 |
+
for input_id in all_inputs:
|
| 1474 |
+
# we don't care about nested graph here, just handle current graph cropping.
|
| 1475 |
+
node = self.get_node_by_output(input_id, search_in_parent_graphs=False)
|
| 1476 |
+
if not node:
|
| 1477 |
+
# some nodes (for example Scan) have optional inputs, which
|
| 1478 |
+
# might have empty input.
|
| 1479 |
+
# subgraph might have input defined in outer graph
|
| 1480 |
+
continue
|
| 1481 |
+
if node not in res_set:
|
| 1482 |
+
if input_checker and input_checker(node) is False:
|
| 1483 |
+
continue
|
| 1484 |
+
processing_set.add(node)
|
| 1485 |
+
return res_set
|
| 1486 |
+
|
| 1487 |
+
def extract_sub_graph_nodes(self, outputs_name, input_checker=None, remove_unused_inputs=True):
|
| 1488 |
+
"""Return nodes of subgraph having output_ids as outputs.
|
| 1489 |
+
Args:
|
| 1490 |
+
output_ids: output node output id of the subgraph to find
|
| 1491 |
+
input_checker: customized input check function: bool func(node)
|
| 1492 |
+
remove_unused_inputs: bool, indicates whether unused placeholder inputs will be removed
|
| 1493 |
+
in the resulting nodes.
|
| 1494 |
+
Return:
|
| 1495 |
+
a list of nodes
|
| 1496 |
+
"""
|
| 1497 |
+
res_set = set()
|
| 1498 |
+
if not outputs_name:
|
| 1499 |
+
return list(res_set)
|
| 1500 |
+
|
| 1501 |
+
for output in outputs_name:
|
| 1502 |
+
node = self.get_node_by_output(output, search_in_parent_graphs=False)
|
| 1503 |
+
res_set = res_set.union(self._extract_sub_graph_nodes(node, input_checker))
|
| 1504 |
+
|
| 1505 |
+
if not remove_unused_inputs:
|
| 1506 |
+
# add back placeholder nodes if they are not connected to outputs.
|
| 1507 |
+
res_set = res_set.union(self.inputs)
|
| 1508 |
+
|
| 1509 |
+
return list(res_set)
|
| 1510 |
+
|
| 1511 |
+
def delete_unused_nodes(self, outputs_name):
|
| 1512 |
+
"""Delete nodes not in subgraph ending with output_names."""
|
| 1513 |
+
if not outputs_name:
|
| 1514 |
+
logger.debug("Outputs not specified, delete_unused_nodes not taking effect.")
|
| 1515 |
+
return
|
| 1516 |
+
|
| 1517 |
+
# we need keep those placeholders that are used as input of Loop's body graph.
|
| 1518 |
+
# some of them are not used in the graph, but still need be there to keep the graph complete.
|
| 1519 |
+
related_nodes = self.extract_sub_graph_nodes(outputs_name, remove_unused_inputs=False)
|
| 1520 |
+
for node in related_nodes:
|
| 1521 |
+
attr_body_graphs = node.get_body_graphs()
|
| 1522 |
+
if attr_body_graphs:
|
| 1523 |
+
for body_graph in attr_body_graphs.values():
|
| 1524 |
+
body_graph.delete_unused_nodes(body_graph.outputs)
|
| 1525 |
+
self.reset_nodes(related_nodes)
|
| 1526 |
+
|
| 1527 |
+
def safe_to_remove_nodes(self, to_delete):
|
| 1528 |
+
""" List of nodes that safe to delete (i.e. outputs not consumed by other nodes.)"""
|
| 1529 |
+
safe_to_remove = []
|
| 1530 |
+
delete_set = set(to_delete)
|
| 1531 |
+
for n in delete_set:
|
| 1532 |
+
out_consumers = set()
|
| 1533 |
+
for out in n.output:
|
| 1534 |
+
out_consumers |= set(self.find_output_consumers(out))
|
| 1535 |
+
if out_consumers.issubset(delete_set):
|
| 1536 |
+
safe_to_remove.append(n)
|
| 1537 |
+
return safe_to_remove
|
| 1538 |
+
|
| 1539 |
+
# TODO(tomwildenhain): Remove this function
|
| 1540 |
+
def safe_remove_nodes(self, to_delete):
|
| 1541 |
+
"""Delete nodes in `to_delete` without third-party node consuming it."""
|
| 1542 |
+
delete_set = set(to_delete)
|
| 1543 |
+
for n in delete_set:
|
| 1544 |
+
out_consumers = set()
|
| 1545 |
+
for out in n.output:
|
| 1546 |
+
out_consumers |= set(self.find_output_consumers(out))
|
| 1547 |
+
if out_consumers.issubset(delete_set):
|
| 1548 |
+
self.remove_node(n.name)
|
| 1549 |
+
|
| 1550 |
+
def is_safe_to_remove_nodes(self, to_delete, outputs_to_ignore=None):
|
| 1551 |
+
"""Returns true if the outputs of all the nodes in to_delete have no third-party nodes consuming them"""
|
| 1552 |
+
delete_set = set(to_delete)
|
| 1553 |
+
outputs_to_ignore_set = set(outputs_to_ignore or [])
|
| 1554 |
+
for n in delete_set:
|
| 1555 |
+
out_consumers = set()
|
| 1556 |
+
for out in n.output:
|
| 1557 |
+
if out in outputs_to_ignore_set:
|
| 1558 |
+
continue
|
| 1559 |
+
out_consumers |= set(self.find_output_consumers(out))
|
| 1560 |
+
if not out_consumers.issubset(delete_set):
|
| 1561 |
+
return False
|
| 1562 |
+
return True
|
| 1563 |
+
|
| 1564 |
+
|
| 1565 |
+
class GraphUtil(object):
|
| 1566 |
+
"""Utilities for Graph manipulation."""
|
| 1567 |
+
|
| 1568 |
+
@staticmethod
|
| 1569 |
+
def optimize_graph(graph):
|
| 1570 |
+
return optimizer.optimize_graph(graph)
|
| 1571 |
+
|
| 1572 |
+
@staticmethod
|
| 1573 |
+
def optimize_model_proto(onnx_model_proto):
|
| 1574 |
+
"""Optimize the model proto, for example: eliminating all useless Transpose pairs.
|
| 1575 |
+
|
| 1576 |
+
Returns:
|
| 1577 |
+
model proto after optimization, if optimizer run successfully
|
| 1578 |
+
or onnx_model_proto, if exceptions happens
|
| 1579 |
+
"""
|
| 1580 |
+
try:
|
| 1581 |
+
kwargs = GraphUtil.get_onnx_model_properties(onnx_model_proto)
|
| 1582 |
+
graph = GraphUtil.create_graph_from_onnx_model(onnx_model_proto)
|
| 1583 |
+
graph = GraphUtil.optimize_graph(graph)
|
| 1584 |
+
model_proto = graph.make_model(onnx_model_proto.graph.doc_string,
|
| 1585 |
+
graph_name=onnx_model_proto.graph.name, **kwargs)
|
| 1586 |
+
|
| 1587 |
+
if onnx_model_proto.metadata_props:
|
| 1588 |
+
metadata_props = {p.key: p.value for p in onnx_model_proto.metadata_props}
|
| 1589 |
+
helper.set_model_props(model_proto, metadata_props)
|
| 1590 |
+
return model_proto
|
| 1591 |
+
except Exception:
|
| 1592 |
+
# sometimes, onnx shape inference will fail for some reason,
|
| 1593 |
+
# return onnx_model_proto for this case
|
| 1594 |
+
logger.warning("Failed to optimize model proto", exc_info=1)
|
| 1595 |
+
return onnx_model_proto
|
| 1596 |
+
|
| 1597 |
+
@staticmethod
|
| 1598 |
+
def get_onnx_model_properties(onnx_model_proto):
|
| 1599 |
+
"""Get ModelProto properties"""
|
| 1600 |
+
kwargs = {}
|
| 1601 |
+
if onnx_model_proto.HasField('ir_version'):
|
| 1602 |
+
kwargs["ir_version"] = onnx_model_proto.ir_version
|
| 1603 |
+
if onnx_model_proto.HasField('producer_name'):
|
| 1604 |
+
kwargs["producer_name"] = onnx_model_proto.producer_name
|
| 1605 |
+
if onnx_model_proto.HasField('producer_version'):
|
| 1606 |
+
kwargs["producer_version"] = onnx_model_proto.producer_version
|
| 1607 |
+
if onnx_model_proto.HasField('domain'):
|
| 1608 |
+
kwargs["domain"] = onnx_model_proto.domain
|
| 1609 |
+
if onnx_model_proto.HasField('model_version'):
|
| 1610 |
+
kwargs["model_version"] = onnx_model_proto.model_version
|
| 1611 |
+
if onnx_model_proto.HasField('doc_string'):
|
| 1612 |
+
kwargs["doc_string"] = onnx_model_proto.doc_string
|
| 1613 |
+
kwargs["opset_imports"] = onnx_model_proto.opset_import
|
| 1614 |
+
|
| 1615 |
+
return kwargs
|
| 1616 |
+
|
| 1617 |
+
@staticmethod
|
| 1618 |
+
def create_graph_from_onnx_model(onnx_model_proto):
|
| 1619 |
+
"""Create Graph loading onnx model proto."""
|
| 1620 |
+
# apply shape inference on the model
|
| 1621 |
+
inferred_model = shape_inference.infer_shapes(onnx_model_proto)
|
| 1622 |
+
graph_proto = inferred_model.graph
|
| 1623 |
+
|
| 1624 |
+
opset_version = None
|
| 1625 |
+
extra_opset = []
|
| 1626 |
+
for opset in onnx_model_proto.opset_import:
|
| 1627 |
+
if not opset.domain:
|
| 1628 |
+
# domain field is None or empty means it is onnx domain
|
| 1629 |
+
opset_version = opset.version
|
| 1630 |
+
else:
|
| 1631 |
+
extra_opset.append(opset)
|
| 1632 |
+
|
| 1633 |
+
utils.make_sure(opset_version is not None, "opset version is not specified for onnx domain")
|
| 1634 |
+
main_graph = GraphUtil.create_graph_from_onnx_graph(graph_proto, opset_version, extra_opset)
|
| 1635 |
+
return main_graph
|
| 1636 |
+
|
| 1637 |
+
@staticmethod
|
| 1638 |
+
def create_graph_from_onnx_graph(graph_proto, opset_version=None, extra_opset=None):
|
| 1639 |
+
"""Create Graph loading onnx graph proto."""
|
| 1640 |
+
output_shapes = {}
|
| 1641 |
+
output_dtypes = {}
|
| 1642 |
+
|
| 1643 |
+
shapes, dtypes = GraphUtil._parse_shape_and_type_from_value_infos(graph_proto.value_info)
|
| 1644 |
+
output_shapes.update(shapes)
|
| 1645 |
+
output_dtypes.update(dtypes)
|
| 1646 |
+
|
| 1647 |
+
shapes, dtypes = GraphUtil._parse_shape_and_type_from_value_infos(graph_proto.output)
|
| 1648 |
+
output_shapes.update(shapes)
|
| 1649 |
+
output_dtypes.update(dtypes)
|
| 1650 |
+
|
| 1651 |
+
nodes_to_append = []
|
| 1652 |
+
for n in graph_proto.node:
|
| 1653 |
+
if n.op_type == "Constant":
|
| 1654 |
+
n.op_type = "Const"
|
| 1655 |
+
|
| 1656 |
+
# some pytorch model had empty names - make one up
|
| 1657 |
+
if not n.name:
|
| 1658 |
+
n.name = utils.make_name("was_empty")
|
| 1659 |
+
nodes_to_append.append(n)
|
| 1660 |
+
|
| 1661 |
+
output_names = []
|
| 1662 |
+
for n in graph_proto.output:
|
| 1663 |
+
output_names.append(n.name)
|
| 1664 |
+
|
| 1665 |
+
g = Graph(nodes_to_append, output_shapes, output_dtypes, None, opset_version, extra_opset, None, output_names)
|
| 1666 |
+
const_nodes = GraphUtil._parse_graph_initializer(g, graph_proto)
|
| 1667 |
+
GraphUtil._parse_graph_input(g, graph_proto, [n.name for n in const_nodes])
|
| 1668 |
+
|
| 1669 |
+
for n in g.get_nodes():
|
| 1670 |
+
for attr_name, attr_val in n.attr.items():
|
| 1671 |
+
if attr_val.HasField('g'):
|
| 1672 |
+
# it was assumed that the a.g has inferred shapes/dtypes.
|
| 1673 |
+
sub_g = GraphUtil.create_graph_from_onnx_graph(attr_val.g, opset_version, extra_opset)
|
| 1674 |
+
n.set_body_graph_as_attr(attr_name, sub_g)
|
| 1675 |
+
return g
|
| 1676 |
+
|
| 1677 |
+
@staticmethod
|
| 1678 |
+
def get_node_count_from_onnx_graph(graph_proto):
|
| 1679 |
+
op_cnt = collections.Counter()
|
| 1680 |
+
for n in graph_proto.node:
|
| 1681 |
+
op_cnt[n.op_type] += 1
|
| 1682 |
+
return op_cnt
|
| 1683 |
+
|
| 1684 |
+
@staticmethod
|
| 1685 |
+
def _parse_shape_and_type_from_value_infos(value_infos):
|
| 1686 |
+
"""Get nodes output shapes and types from value infos."""
|
| 1687 |
+
output_shapes = {}
|
| 1688 |
+
output_dtypes = {}
|
| 1689 |
+
for shape_info in value_infos:
|
| 1690 |
+
type_proto = shape_info.type
|
| 1691 |
+
elem_type = type_proto.tensor_type.elem_type
|
| 1692 |
+
shape = type_proto.tensor_type.shape
|
| 1693 |
+
tuned_shape = []
|
| 1694 |
+
for d in shape.dim:
|
| 1695 |
+
if d.HasField('dim_param'):
|
| 1696 |
+
tuned_shape.append(-1)
|
| 1697 |
+
elif d.HasField('dim_value'):
|
| 1698 |
+
tuned_shape.append(d.dim_value)
|
| 1699 |
+
else:
|
| 1700 |
+
# it is found, some unknown dims is missing after inference.
|
| 1701 |
+
tuned_shape.append(-1)
|
| 1702 |
+
output_shapes[shape_info.name] = tuned_shape
|
| 1703 |
+
output_dtypes[shape_info.name] = elem_type
|
| 1704 |
+
|
| 1705 |
+
return output_shapes, output_dtypes
|
| 1706 |
+
|
| 1707 |
+
@staticmethod
|
| 1708 |
+
def _parse_graph_initializer(g, graph_proto):
|
| 1709 |
+
"""Get graph initializers and put into Graph object."""
|
| 1710 |
+
const_nodes = []
|
| 1711 |
+
for initializer in graph_proto.initializer:
|
| 1712 |
+
np_val = numpy_helper.to_array(initializer)
|
| 1713 |
+
const_nodes.append(g.make_const(initializer.name, np_val))
|
| 1714 |
+
|
| 1715 |
+
return const_nodes
|
| 1716 |
+
|
| 1717 |
+
@staticmethod
|
| 1718 |
+
def _parse_graph_input(g, graph_proto, const_node_names):
|
| 1719 |
+
"""Get graph inputs not defined as initializers and put into Graph object."""
|
| 1720 |
+
shapes, dtypes = GraphUtil._parse_shape_and_type_from_value_infos(graph_proto.input)
|
| 1721 |
+
# make sure the input is added in order we read from graph_proto,
|
| 1722 |
+
# because for subgraphs, the input orders matter.
|
| 1723 |
+
for graph_input in graph_proto.input:
|
| 1724 |
+
name = graph_input.name
|
| 1725 |
+
shape = shapes[name]
|
| 1726 |
+
dtype = dtypes[name]
|
| 1727 |
+
if name not in const_node_names:
|
| 1728 |
+
g.add_graph_input(name, dtype, shape)
|
| 1729 |
+
else:
|
| 1730 |
+
g.add_graph_input_with_default(name, g.get_node_by_name(name), dtype, shape)
|
lib/python3.10/site-packages/tf2onnx/graph_builder.py
ADDED
|
@@ -0,0 +1,228 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
"""
|
| 5 |
+
tf2onnx.graph_helper - class to help building graph, such as helping to make complex node
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
import numpy as np
|
| 9 |
+
from tf2onnx import utils, logging
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
# pylint: disable=missing-docstring
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
logger = logging.getLogger(__name__)
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
class GraphBuilder(object):
|
| 19 |
+
"""help to build graph"""
|
| 20 |
+
def __init__(self, graph):
|
| 21 |
+
self._g = graph
|
| 22 |
+
|
| 23 |
+
@property
|
| 24 |
+
def graph(self):
|
| 25 |
+
return self._g
|
| 26 |
+
|
| 27 |
+
def make_slice(self, kwargs, name=None, shapes=None, dtypes=None, return_node=False):
|
| 28 |
+
"""
|
| 29 |
+
slice changes its schema at opset 10: it treats some attributes as dynamic input
|
| 30 |
+
so this function has to process inputs according to graph's opset version
|
| 31 |
+
to get "inputs" and "attr" to feed "make_node"
|
| 32 |
+
kwargs: key could be ["data", "starts", "ends", "axes", "steps", "outputs"].
|
| 33 |
+
"""
|
| 34 |
+
outputs = kwargs.pop("outputs", None)
|
| 35 |
+
|
| 36 |
+
if self.graph.opset < 10:
|
| 37 |
+
# "data" is string
|
| 38 |
+
# "starts", "ends" and "axes" are attributes, and "axes" is optional.
|
| 39 |
+
data = kwargs.pop("data")
|
| 40 |
+
starts = self.convert_to_attribute(kwargs.pop("starts"))
|
| 41 |
+
ends = self.convert_to_attribute(kwargs.pop("ends"))
|
| 42 |
+
axes = self.convert_to_attribute(kwargs.pop("axes", None), is_optional=True)
|
| 43 |
+
attr = {"starts": starts, "ends": ends, "axes": axes}
|
| 44 |
+
inputs = [data]
|
| 45 |
+
else:
|
| 46 |
+
# slice-10 has 3 required inputs "data", "starts", "ends"l
|
| 47 |
+
# and 2 optional inputs "axes", "steps"
|
| 48 |
+
# input sequence should be "data", "starts", "ends", "axes", "steps"
|
| 49 |
+
attr = {}
|
| 50 |
+
data = kwargs.pop("data")
|
| 51 |
+
starts = self.convert_to_input(kwargs.pop("starts"), "const_starts", dtype=np.int64)
|
| 52 |
+
ends = self.convert_to_input(kwargs.pop("ends"), "const_ends", dtype=np.int64)
|
| 53 |
+
axes = self.convert_to_input(kwargs.pop("axes", None), "const_axes", is_optional=True, dtype=np.int64)
|
| 54 |
+
steps = self.convert_to_input(kwargs.pop("steps", None), "const_steps", is_optional=True, dtype=np.int64)
|
| 55 |
+
inputs = [data, starts, ends, axes, steps]
|
| 56 |
+
|
| 57 |
+
# pro-process inputs and attr
|
| 58 |
+
utils.make_sure(not kwargs, "kwargs contains un-used key")
|
| 59 |
+
|
| 60 |
+
new_attr = {}
|
| 61 |
+
for key, val in attr.items():
|
| 62 |
+
if val is not None:
|
| 63 |
+
new_attr[key] = val
|
| 64 |
+
attr = new_attr
|
| 65 |
+
|
| 66 |
+
for ind, val in enumerate(inputs):
|
| 67 |
+
if val is None:
|
| 68 |
+
inputs[ind] = utils.ONNX_EMPTY_INPUT # empty string means no connection in ONNX
|
| 69 |
+
# remove tailing ""
|
| 70 |
+
while inputs[-1] == utils.ONNX_EMPTY_INPUT:
|
| 71 |
+
inputs = inputs[:-1]
|
| 72 |
+
|
| 73 |
+
if self.graph.opset >= 10:
|
| 74 |
+
dtype = self.graph.get_dtype(inputs[1])
|
| 75 |
+
for input_data in inputs[1:]:
|
| 76 |
+
if input_data != utils.ONNX_EMPTY_INPUT:
|
| 77 |
+
utils.make_sure(dtype == self.graph.get_dtype(input_data), "dtype should be same")
|
| 78 |
+
|
| 79 |
+
node = self.graph.make_node(op_type="Slice", inputs=inputs, attr=attr, name=name,
|
| 80 |
+
outputs=outputs, shapes=shapes, dtypes=dtypes)
|
| 81 |
+
if return_node:
|
| 82 |
+
return node
|
| 83 |
+
return node.output[0]
|
| 84 |
+
|
| 85 |
+
def make_reduce_sum(self, kwargs, name=None, shapes=None, dtypes=None):
|
| 86 |
+
"""
|
| 87 |
+
ReduceSum changes its schema at opset 13: it treats some axes as dynamic input
|
| 88 |
+
kwargs: key could be ["data", "axes", "keepdims", "noop_with_empty_axes", "outputs"].
|
| 89 |
+
"""
|
| 90 |
+
outputs = kwargs.pop("outputs", None)
|
| 91 |
+
|
| 92 |
+
if self.graph.opset < 13:
|
| 93 |
+
data = kwargs.pop("data")
|
| 94 |
+
axes = self.convert_to_attribute(kwargs.pop("axes", None), is_optional=True)
|
| 95 |
+
keepdims = kwargs.pop("keepdims", 1)
|
| 96 |
+
noop_with_empty_axes = kwargs.pop("noop_with_empty_axes", 0)
|
| 97 |
+
if noop_with_empty_axes == 0 and axes == []:
|
| 98 |
+
axes = None
|
| 99 |
+
attr = {"axes": axes, "keepdims": keepdims}
|
| 100 |
+
inputs = [data]
|
| 101 |
+
else:
|
| 102 |
+
keepdims = kwargs.pop("keepdims", 1)
|
| 103 |
+
noop_with_empty_axes = kwargs.pop("noop_with_empty_axes", 0)
|
| 104 |
+
data = self.convert_to_input(kwargs.pop("data"), "const_data")
|
| 105 |
+
axes = self.convert_to_input(kwargs.pop("axes", None), "const_axes", is_optional=True, dtype=np.int64)
|
| 106 |
+
attr = {"keepdims": keepdims, "noop_with_empty_axes": noop_with_empty_axes}
|
| 107 |
+
inputs = [data, axes]
|
| 108 |
+
|
| 109 |
+
utils.make_sure(not kwargs, "kwargs contains un-used key")
|
| 110 |
+
|
| 111 |
+
new_attr = {}
|
| 112 |
+
for key, val in attr.items():
|
| 113 |
+
if val is not None:
|
| 114 |
+
new_attr[key] = val
|
| 115 |
+
attr = new_attr
|
| 116 |
+
|
| 117 |
+
return self.graph.make_node(op_type="ReduceSum", inputs=inputs, attr=attr, name=name,
|
| 118 |
+
outputs=outputs, shapes=shapes, dtypes=dtypes).output[0]
|
| 119 |
+
|
| 120 |
+
def make_squeeze(self, kwargs, name=None, shapes=None, dtypes=None, return_node=False, op_name_scope=None):
|
| 121 |
+
"""
|
| 122 |
+
Squeeze changes its schema at opset 13: it treats axes as a dynamic input
|
| 123 |
+
kwargs: key could be ["data", "axes"].
|
| 124 |
+
"""
|
| 125 |
+
outputs = kwargs.pop("outputs", None)
|
| 126 |
+
|
| 127 |
+
if self.graph.opset < 13:
|
| 128 |
+
data = kwargs.pop("data")
|
| 129 |
+
axes = self.convert_to_attribute(kwargs.pop("axes", None), is_optional=True)
|
| 130 |
+
attr = {"axes": axes}
|
| 131 |
+
inputs = [data]
|
| 132 |
+
else:
|
| 133 |
+
data = kwargs.pop("data")
|
| 134 |
+
axes = self.convert_to_input(kwargs.pop("axes", None), "const_axes", is_optional=True, dtype=np.int64)
|
| 135 |
+
attr = {}
|
| 136 |
+
inputs = [data, axes]
|
| 137 |
+
|
| 138 |
+
utils.make_sure(not kwargs, "kwargs contains un-used key")
|
| 139 |
+
|
| 140 |
+
new_attr = {}
|
| 141 |
+
for key, val in attr.items():
|
| 142 |
+
if val is not None:
|
| 143 |
+
new_attr[key] = val
|
| 144 |
+
attr = new_attr
|
| 145 |
+
|
| 146 |
+
for ind, val in enumerate(inputs):
|
| 147 |
+
if val is None:
|
| 148 |
+
inputs[ind] = utils.ONNX_EMPTY_INPUT # empty string means no connection in ONNX
|
| 149 |
+
# remove tailing ""
|
| 150 |
+
while inputs[-1] == utils.ONNX_EMPTY_INPUT:
|
| 151 |
+
inputs = inputs[:-1]
|
| 152 |
+
|
| 153 |
+
node = self.graph.make_node(op_type="Squeeze", inputs=inputs, attr=attr, name=name,
|
| 154 |
+
outputs=outputs, shapes=shapes, dtypes=dtypes,
|
| 155 |
+
op_name_scope=op_name_scope)
|
| 156 |
+
if return_node:
|
| 157 |
+
return node
|
| 158 |
+
return node.output[0]
|
| 159 |
+
|
| 160 |
+
def make_unsqueeze(self, kwargs, name=None, shapes=None, dtypes=None, return_node=False, op_name_scope=None):
|
| 161 |
+
"""
|
| 162 |
+
Unsqueeze changes its schema at opset 13: it treats axes as a dynamic input
|
| 163 |
+
kwargs: key could be ["data", "axes"].
|
| 164 |
+
"""
|
| 165 |
+
outputs = kwargs.pop("outputs", None)
|
| 166 |
+
|
| 167 |
+
if self.graph.opset < 13:
|
| 168 |
+
data = kwargs.pop("data")
|
| 169 |
+
axes = self.convert_to_attribute(kwargs.pop("axes", None), is_optional=True)
|
| 170 |
+
attr = {"axes": axes}
|
| 171 |
+
inputs = [data]
|
| 172 |
+
else:
|
| 173 |
+
data = kwargs.pop("data")
|
| 174 |
+
axes = self.convert_to_input(kwargs.pop("axes", None), "const_axes", is_optional=True, dtype=np.int64)
|
| 175 |
+
attr = {}
|
| 176 |
+
inputs = [data, axes]
|
| 177 |
+
|
| 178 |
+
utils.make_sure(not kwargs, "kwargs contains un-used key")
|
| 179 |
+
|
| 180 |
+
new_attr = {}
|
| 181 |
+
for key, val in attr.items():
|
| 182 |
+
if val is not None:
|
| 183 |
+
new_attr[key] = val
|
| 184 |
+
attr = new_attr
|
| 185 |
+
|
| 186 |
+
for ind, val in enumerate(inputs):
|
| 187 |
+
if val is None:
|
| 188 |
+
inputs[ind] = utils.ONNX_EMPTY_INPUT # empty string means no connection in ONNX
|
| 189 |
+
# remove tailing ""
|
| 190 |
+
while inputs[-1] == utils.ONNX_EMPTY_INPUT:
|
| 191 |
+
inputs = inputs[:-1]
|
| 192 |
+
|
| 193 |
+
node = self.graph.make_node(op_type="Unsqueeze", inputs=inputs, attr=attr, name=name,
|
| 194 |
+
outputs=outputs, shapes=shapes, dtypes=dtypes,
|
| 195 |
+
op_name_scope=op_name_scope)
|
| 196 |
+
if return_node:
|
| 197 |
+
return node
|
| 198 |
+
return node.output[0]
|
| 199 |
+
|
| 200 |
+
def convert_to_input(self, tensor, const_name, is_optional=False, dtype=None):
|
| 201 |
+
"""in ONNX, input shold come from node, so it must be a string"""
|
| 202 |
+
if is_optional and tensor is None:
|
| 203 |
+
return None
|
| 204 |
+
|
| 205 |
+
utils.make_sure(tensor is not None, "input is required so it couldn't be None")
|
| 206 |
+
|
| 207 |
+
res = tensor
|
| 208 |
+
if isinstance(tensor, list):
|
| 209 |
+
res = self.graph.make_const(utils.make_name(const_name), np.array(tensor, dtype)).output[0]
|
| 210 |
+
|
| 211 |
+
utils.make_sure(isinstance(res, str), "input is a dynamic input, so a str is needed")
|
| 212 |
+
|
| 213 |
+
return res
|
| 214 |
+
|
| 215 |
+
def convert_to_attribute(self, tensor, is_optional=False):
|
| 216 |
+
if is_optional and tensor is None:
|
| 217 |
+
return None
|
| 218 |
+
|
| 219 |
+
utils.make_sure(tensor is not None, "input is required so it couldn't be None")
|
| 220 |
+
|
| 221 |
+
res = tensor
|
| 222 |
+
if isinstance(tensor, str):
|
| 223 |
+
const_node = self.graph.get_node_by_output(tensor)
|
| 224 |
+
res = const_node.get_tensor_value(as_list=True)
|
| 225 |
+
|
| 226 |
+
utils.make_sure(isinstance(res, list), "input is an attr, so a list is needed")
|
| 227 |
+
|
| 228 |
+
return res
|
lib/python3.10/site-packages/tf2onnx/graph_matcher.py
ADDED
|
@@ -0,0 +1,277 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
|
| 3 |
+
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
|
| 4 |
+
#
|
| 5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 6 |
+
# you may not use this file except in compliance with the License.
|
| 7 |
+
# You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 14 |
+
# See the License for the specific language governing permissions and
|
| 15 |
+
# limitations under the License.
|
| 16 |
+
# ==============================================================================
|
| 17 |
+
"""Utilities that match patterns in a tf.Graph."""
|
| 18 |
+
|
| 19 |
+
from __future__ import absolute_import
|
| 20 |
+
from __future__ import division
|
| 21 |
+
from __future__ import print_function
|
| 22 |
+
from __future__ import unicode_literals
|
| 23 |
+
|
| 24 |
+
from itertools import permutations
|
| 25 |
+
import six
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
class OpTypePattern(object):
|
| 29 |
+
"""A tree pattern that matches TF expressions with certain op types."""
|
| 30 |
+
|
| 31 |
+
def __init__(self, op_type, name=None, inputs=None, allow_reorder=None):
|
| 32 |
+
"""Initializes an OpTypePattern.
|
| 33 |
+
|
| 34 |
+
Args:
|
| 35 |
+
op_type: string that specifies the allowed types of the root. It can be
|
| 36 |
+
(1) an op type, e.g. 'Conv2D',
|
| 37 |
+
(2) '*', i.e. wildcard, or
|
| 38 |
+
(3) multiple op types separated by '|', e.g., 'Relu|Relu6'.
|
| 39 |
+
We could use regex strings, which might be worthwhile when we have many
|
| 40 |
+
similar TF op types.
|
| 41 |
+
name: Optional string. The name of the pattern that can be looked up in
|
| 42 |
+
MatchResult.
|
| 43 |
+
inputs: Optional list of `OpTypePattern`s or strings that specify the
|
| 44 |
+
patterns for the inputs of a matching op. If None, this pattern accepts
|
| 45 |
+
any inputs of a matching op.
|
| 46 |
+
allow_reorder: Optional boolean that overrides allow_reorder in GraphMatcher
|
| 47 |
+
for this pattern's immediate inputs.
|
| 48 |
+
"""
|
| 49 |
+
self._op_type = op_type
|
| 50 |
+
self._name = name
|
| 51 |
+
self.allow_reorder = allow_reorder
|
| 52 |
+
if inputs is None:
|
| 53 |
+
inputs = []
|
| 54 |
+
self._inputs = [
|
| 55 |
+
input_pattern if isinstance(input_pattern, OpTypePattern) else
|
| 56 |
+
OpTypePattern(input_pattern) for input_pattern in inputs
|
| 57 |
+
]
|
| 58 |
+
self.op_type_set = set(op_type.split('|')) if op_type else set()
|
| 59 |
+
|
| 60 |
+
@property
|
| 61 |
+
def op_type(self):
|
| 62 |
+
return self._op_type
|
| 63 |
+
|
| 64 |
+
@property
|
| 65 |
+
def inputs(self):
|
| 66 |
+
return self._inputs
|
| 67 |
+
|
| 68 |
+
@property
|
| 69 |
+
def name(self):
|
| 70 |
+
return self._name
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
class MatchResult(object):
|
| 74 |
+
r"""Encapsulates the result of a match done by GraphMatcher.
|
| 75 |
+
|
| 76 |
+
MatchResult contains a map from OpTypePattern to the matching op and tensor.
|
| 77 |
+
When the matching op has multiple output tensors, the matching tensor is the
|
| 78 |
+
output tensor used by the matching op of the parent pattern. E.g., when we
|
| 79 |
+
match graph
|
| 80 |
+
|
| 81 |
+
- +
|
| 82 |
+
/ \y0 y1/ \
|
| 83 |
+
x split z
|
| 84 |
+
|
|
| 85 |
+
y (nodes are ops; edges are going up)
|
| 86 |
+
|
| 87 |
+
against add_pattern defined as
|
| 88 |
+
|
| 89 |
+
y1_pattern = OpTypePattern('*')
|
| 90 |
+
z_pattern = OpTypePattern('*')
|
| 91 |
+
add_pattern = OpTypePattern('+', inputs=[y1_pattern, z_pattern])
|
| 92 |
+
|
| 93 |
+
the matching op of `y1_pattern` is `split`, and the matching tensor of
|
| 94 |
+
`y1_pattern`
|
| 95 |
+
is `y1` not `y0`.
|
| 96 |
+
"""
|
| 97 |
+
|
| 98 |
+
def __init__(self):
|
| 99 |
+
self._pattern_to_op_tensor = {}
|
| 100 |
+
self._name_to_pattern = {}
|
| 101 |
+
|
| 102 |
+
def add(self, pattern, op, tensor):
|
| 103 |
+
self._pattern_to_op_tensor[pattern] = op, tensor
|
| 104 |
+
if pattern.name is not None:
|
| 105 |
+
# allow this so we can apply subgraphs multiple times
|
| 106 |
+
# if pattern.name in self._name_to_pattern:
|
| 107 |
+
# raise ValueError(
|
| 108 |
+
# 'Name %s is already bound to another pattern' % pattern.name)
|
| 109 |
+
self._name_to_pattern[pattern.name] = pattern
|
| 110 |
+
|
| 111 |
+
def _to_pattern(self, pattern_or_name):
|
| 112 |
+
if isinstance(pattern_or_name, OpTypePattern):
|
| 113 |
+
return pattern_or_name
|
| 114 |
+
|
| 115 |
+
if isinstance(pattern_or_name, six.text_type):
|
| 116 |
+
return self._name_to_pattern.get(pattern_or_name)
|
| 117 |
+
|
| 118 |
+
raise ValueError('pattern_or_name has type %s. Expect OpTypePattern or str.'
|
| 119 |
+
% type(pattern_or_name))
|
| 120 |
+
|
| 121 |
+
def get_op(self, pattern_or_name, default=None):
|
| 122 |
+
"""
|
| 123 |
+
For now, if the op can not be effectively obtained, then the function will return the default
|
| 124 |
+
instead of an error.
|
| 125 |
+
"""
|
| 126 |
+
op_and_tensor = self._pattern_to_op_tensor.get(self._to_pattern(pattern_or_name))
|
| 127 |
+
if op_and_tensor:
|
| 128 |
+
return op_and_tensor[0]
|
| 129 |
+
return default
|
| 130 |
+
|
| 131 |
+
def get_tensor(self, pattern_or_name, default=None):
|
| 132 |
+
"""
|
| 133 |
+
For now, if the tensor can not be effectively obtained, then the function will return the default
|
| 134 |
+
instead of an error.
|
| 135 |
+
"""
|
| 136 |
+
op_and_tensor = self._pattern_to_op_tensor.get(self._to_pattern(pattern_or_name))
|
| 137 |
+
if op_and_tensor:
|
| 138 |
+
return op_and_tensor[1]
|
| 139 |
+
return default
|
| 140 |
+
|
| 141 |
+
def get_nodes(self):
|
| 142 |
+
return [n[0] for n in self._pattern_to_op_tensor.values()]
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
class GraphMatcher(object):
|
| 146 |
+
"""Checks if a particular subgraph matches a given pattern."""
|
| 147 |
+
|
| 148 |
+
def __init__(self, pattern, allow_reorder=False):
|
| 149 |
+
"""Initializes a GraphMatcher.
|
| 150 |
+
|
| 151 |
+
Args:
|
| 152 |
+
pattern: The `OpTypePattern` against which `GraphMatcher` matches
|
| 153 |
+
subgraphs.
|
| 154 |
+
"""
|
| 155 |
+
self._pattern = pattern
|
| 156 |
+
self._allow_reorder = allow_reorder
|
| 157 |
+
|
| 158 |
+
@staticmethod
|
| 159 |
+
def _is_op_type_same(op, pattern):
|
| 160 |
+
if pattern.op_type == "*":
|
| 161 |
+
return True
|
| 162 |
+
|
| 163 |
+
if op.type in pattern.op_type_set:
|
| 164 |
+
return True
|
| 165 |
+
|
| 166 |
+
return False
|
| 167 |
+
|
| 168 |
+
def _match_pattern(self, pattern, op, tensor):
|
| 169 |
+
"""Returns whether an TF expression rooted at `op` matches `pattern`.
|
| 170 |
+
|
| 171 |
+
If there is a match, adds to `self._match_result` the matching op and tensor
|
| 172 |
+
with key `pattern`.
|
| 173 |
+
|
| 174 |
+
Args:
|
| 175 |
+
pattern: An `OpTypePattern`.
|
| 176 |
+
op: A `tf.Operation` to match against the pattern.
|
| 177 |
+
tensor: the output `tf.Tensor` of `op` that is used by the matching op of
|
| 178 |
+
`pattern`'s parent. Can be None if `pattern` is already the root of the
|
| 179 |
+
pattern tree.
|
| 180 |
+
|
| 181 |
+
Returns:
|
| 182 |
+
if matched return True and match_list whose elem is [pattern, op, tensor]
|
| 183 |
+
else return False
|
| 184 |
+
the condition that op is matched with pattern:
|
| 185 |
+
1 op is same:
|
| 186 |
+
if pattern.op_type is None or *, then treat as same
|
| 187 |
+
or op.type in pattern.op_type.split("|")
|
| 188 |
+
2 op.inputs are same with pattern.inputs:
|
| 189 |
+
if not pattern.inputs, then treat as same
|
| 190 |
+
otherwise, iteratively compare input nodes with pattern.
|
| 191 |
+
"""
|
| 192 |
+
match_list = []
|
| 193 |
+
if pattern.op_type is None:
|
| 194 |
+
return True, match_list
|
| 195 |
+
|
| 196 |
+
if self._is_op_type_same(op, pattern):
|
| 197 |
+
match_list.append([pattern, op, tensor])
|
| 198 |
+
else:
|
| 199 |
+
return False, match_list
|
| 200 |
+
|
| 201 |
+
if not pattern.inputs:
|
| 202 |
+
# If pattern.inputs is empty, skips the rest and accepts all the inputs.
|
| 203 |
+
return True, match_list
|
| 204 |
+
|
| 205 |
+
if not op or len(op.inputs) != len(pattern.inputs):
|
| 206 |
+
return False, match_list
|
| 207 |
+
|
| 208 |
+
allow_reorder = pattern.allow_reorder
|
| 209 |
+
if allow_reorder is None:
|
| 210 |
+
allow_reorder = self._allow_reorder
|
| 211 |
+
if allow_reorder:
|
| 212 |
+
pattern_inputs_list = permutations(pattern.inputs)
|
| 213 |
+
else:
|
| 214 |
+
pattern_inputs_list = [pattern.inputs]
|
| 215 |
+
|
| 216 |
+
for possible_pattern_inputs in pattern_inputs_list:
|
| 217 |
+
pat = list(zip(op.inputs, possible_pattern_inputs))
|
| 218 |
+
match_flag_of_inputs = []
|
| 219 |
+
match_lists_of_inputs = []
|
| 220 |
+
for input_tensor, input_pattern in pat:
|
| 221 |
+
# print("MATCHING", input_pattern.op_type, input_tensor.type)
|
| 222 |
+
flag, match_list_of_input = self._match_pattern(input_pattern, input_tensor, input_tensor)
|
| 223 |
+
match_flag_of_inputs.append(flag)
|
| 224 |
+
match_lists_of_inputs.extend(match_list_of_input)
|
| 225 |
+
|
| 226 |
+
if all(match_flag_of_inputs):
|
| 227 |
+
match_list.extend(match_lists_of_inputs)
|
| 228 |
+
return True, match_list
|
| 229 |
+
return False, match_list
|
| 230 |
+
|
| 231 |
+
def _parse_match_list_to_match_result(self, match_list):
|
| 232 |
+
for pattern, op, tensor in match_list:
|
| 233 |
+
self._match_result.add(pattern, op, tensor)
|
| 234 |
+
|
| 235 |
+
def match_op(self, op):
|
| 236 |
+
"""Matches `op` against `self._pattern`.
|
| 237 |
+
|
| 238 |
+
Args:
|
| 239 |
+
op: `tf.Operation` to match against the pattern.
|
| 240 |
+
|
| 241 |
+
Returns:
|
| 242 |
+
Returns a `MatchResult` if `op` matches the pattern; otherwise, returns
|
| 243 |
+
None.
|
| 244 |
+
"""
|
| 245 |
+
self._match_result = MatchResult()
|
| 246 |
+
match_flag, match_list = self._match_pattern(self._pattern, op, tensor=None)
|
| 247 |
+
if not match_flag:
|
| 248 |
+
return None
|
| 249 |
+
self._parse_match_list_to_match_result(match_list)
|
| 250 |
+
return self._match_result
|
| 251 |
+
|
| 252 |
+
def match_ops(self, ops):
|
| 253 |
+
"""Matches each operation in `ops` against `self._pattern`.
|
| 254 |
+
|
| 255 |
+
Args:
|
| 256 |
+
ops: collection of `tf.Operation` to match against the pattern.
|
| 257 |
+
|
| 258 |
+
Yields:
|
| 259 |
+
`MatchResult` for each `tf.Operation` that matches the pattern.
|
| 260 |
+
"""
|
| 261 |
+
for op in ops:
|
| 262 |
+
match_result = self.match_op(op)
|
| 263 |
+
if match_result:
|
| 264 |
+
yield match_result
|
| 265 |
+
|
| 266 |
+
def match_graph(self, graph):
|
| 267 |
+
"""Matches each operation in `graph` against `self._pattern`.
|
| 268 |
+
|
| 269 |
+
Args:
|
| 270 |
+
graph: `tf.Graph` containing operations to match.
|
| 271 |
+
|
| 272 |
+
Yields:
|
| 273 |
+
`MatchResult` for each `tf.Operation` in `graph` that matches the pattern.
|
| 274 |
+
"""
|
| 275 |
+
# Python 3.3.2+ implements `yield from`, but for now:
|
| 276 |
+
for match_result in self.match_ops(graph.get_operations()):
|
| 277 |
+
yield match_result
|
lib/python3.10/site-packages/tf2onnx/handler.py
ADDED
|
@@ -0,0 +1,152 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
"""Opset registry."""
|
| 5 |
+
|
| 6 |
+
from __future__ import absolute_import
|
| 7 |
+
from __future__ import division
|
| 8 |
+
from __future__ import print_function
|
| 9 |
+
from __future__ import unicode_literals
|
| 10 |
+
|
| 11 |
+
import collections
|
| 12 |
+
import inspect
|
| 13 |
+
|
| 14 |
+
from tf2onnx import constants
|
| 15 |
+
|
| 16 |
+
# pylint: disable=unused-argument,missing-docstring,invalid-name
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
class tf_op:
|
| 20 |
+
"""Class to implement the decorator to register handlers that map tf to onnx."""
|
| 21 |
+
|
| 22 |
+
# Maps domains (string) to lists (idx represents opset) of dicts (key = op to handle, value = handler)
|
| 23 |
+
_OPSETS = collections.OrderedDict()
|
| 24 |
+
# Cache of mapping for current domain and opset. Maps op names to handlers [(func, kwargs) tuple]
|
| 25 |
+
_MAPPING = None
|
| 26 |
+
# Cache of mapping from domain to map of op name to handlers. Used to fetch handlers from different domains
|
| 27 |
+
_DOMAIN_MAPPING = None
|
| 28 |
+
|
| 29 |
+
def __init__(self, name, domain=constants.ONNX_DOMAIN, **kwargs):
|
| 30 |
+
"""Called decorator from decorator.
|
| 31 |
+
|
| 32 |
+
:param name: The name (or list of names) of the tensorflow operator.
|
| 33 |
+
:param domain: The domain the handler requires, defaults to onnx.
|
| 34 |
+
:param kwargs: Dictionary that are passed to the handler. A key 'onnx_op' will change the operator name.
|
| 35 |
+
"""
|
| 36 |
+
if not isinstance(name, list):
|
| 37 |
+
name = [name]
|
| 38 |
+
self.names = name
|
| 39 |
+
self.domain = domain
|
| 40 |
+
self.kwargs = kwargs
|
| 41 |
+
|
| 42 |
+
def __call__(self, func):
|
| 43 |
+
for k, v in inspect.getmembers(func, inspect.ismethod):
|
| 44 |
+
if k.startswith("version_"):
|
| 45 |
+
version = int(k.replace("version_", ""))
|
| 46 |
+
tf_op.register_handler(v, version, self.names, self.domain, self.kwargs)
|
| 47 |
+
return func
|
| 48 |
+
|
| 49 |
+
def register_compat_handler(self, func, version):
|
| 50 |
+
"""Register old style custom handler.
|
| 51 |
+
|
| 52 |
+
:param func: The handler.
|
| 53 |
+
:param version: The version of the handler.
|
| 54 |
+
"""
|
| 55 |
+
tf_op.register_handler(func, version, self.names, self.domain, self.kwargs)
|
| 56 |
+
|
| 57 |
+
@staticmethod
|
| 58 |
+
def register_handler(func, version, names, domain, kwargs):
|
| 59 |
+
"""Register handler.
|
| 60 |
+
|
| 61 |
+
:param func: The handler.
|
| 62 |
+
:param version: (int) The opset of onnx (or other domain) required for the handler.
|
| 63 |
+
:param names: List of names of the operators to convert.
|
| 64 |
+
:param domain: The domain the handler requires, defaults to onnx.
|
| 65 |
+
|
| 66 |
+
"""
|
| 67 |
+
opset = tf_op._OPSETS.get(domain)
|
| 68 |
+
if not opset:
|
| 69 |
+
opset = []
|
| 70 |
+
tf_op._OPSETS[domain] = opset
|
| 71 |
+
while version >= len(opset):
|
| 72 |
+
opset.append({})
|
| 73 |
+
opset_dict = opset[version]
|
| 74 |
+
for name in names:
|
| 75 |
+
opset_dict[name] = (func, kwargs)
|
| 76 |
+
|
| 77 |
+
@staticmethod
|
| 78 |
+
def get_opsets():
|
| 79 |
+
return tf_op._OPSETS
|
| 80 |
+
|
| 81 |
+
@staticmethod
|
| 82 |
+
def create_mapping(max_onnx_opset_version, extra_opsets):
|
| 83 |
+
"""Create the final mapping dictionary by stacking domains and opset versions.
|
| 84 |
+
|
| 85 |
+
:param max_onnx_opset_version: The highest onnx opset the resulting graph may use.
|
| 86 |
+
:param extra_opsets: Extra opsets the resulting graph may use.
|
| 87 |
+
"""
|
| 88 |
+
mapping = {constants.ONNX_DOMAIN: max_onnx_opset_version}
|
| 89 |
+
if extra_opsets:
|
| 90 |
+
for extra_opset in extra_opsets:
|
| 91 |
+
mapping[extra_opset.domain] = extra_opset.version
|
| 92 |
+
ops_mapping = {}
|
| 93 |
+
domain_to_ops_mapping = collections.defaultdict(dict)
|
| 94 |
+
for domain, opsets in tf_op.get_opsets().items():
|
| 95 |
+
for target_opset, op_map in enumerate(opsets):
|
| 96 |
+
m = mapping.get(domain)
|
| 97 |
+
if m:
|
| 98 |
+
if target_opset <= m and op_map:
|
| 99 |
+
domain_to_ops_mapping[domain].update(ops_mapping)
|
| 100 |
+
ops_mapping.update(op_map)
|
| 101 |
+
|
| 102 |
+
tf_op._MAPPING = ops_mapping
|
| 103 |
+
tf_op._DOMAIN_MAPPING = domain_to_ops_mapping
|
| 104 |
+
return ops_mapping
|
| 105 |
+
|
| 106 |
+
@staticmethod
|
| 107 |
+
def find_effective_op(name, domain=None):
|
| 108 |
+
"""Find the effective version of an op create_mapping.
|
| 109 |
+
This is used if we need to compose ops from other ops where we'd need to find the
|
| 110 |
+
op that is going to be used in the final graph, for example there is a custom op
|
| 111 |
+
that overrides a onnx op ...
|
| 112 |
+
|
| 113 |
+
:param name: The operator name.
|
| 114 |
+
:param domain: The domain to use (optional).
|
| 115 |
+
"""
|
| 116 |
+
if domain is None:
|
| 117 |
+
map_info = tf_op._MAPPING.get(name)
|
| 118 |
+
else:
|
| 119 |
+
map_info = tf_op._DOMAIN_MAPPING[domain].get(name)
|
| 120 |
+
if map_info is None:
|
| 121 |
+
return None
|
| 122 |
+
return map_info
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
class tfl_op:
|
| 126 |
+
"""Class to implement the decorator to register handlers that map tflite to tf or onnx."""
|
| 127 |
+
|
| 128 |
+
def __init__(self, name, domain=constants.ONNX_DOMAIN, **kwargs):
|
| 129 |
+
"""Called decorator from decorator.
|
| 130 |
+
|
| 131 |
+
:param name: The name (or list of names) of the tflite operator.
|
| 132 |
+
:param domain: The domain the operator belongs to, defaults to onnx. Use 'com.google.tensorflow' for tflite->tf
|
| 133 |
+
:param kwargs: Dictionary that are passed to the handler. A key 'onnx_op' will change the operator name.
|
| 134 |
+
'tf_op' will convert the op to tf during a tflite to tf conversion pass.
|
| 135 |
+
"""
|
| 136 |
+
if not isinstance(name, list):
|
| 137 |
+
name = [name]
|
| 138 |
+
self.names = name
|
| 139 |
+
self.domain = domain
|
| 140 |
+
self.kwargs = kwargs
|
| 141 |
+
|
| 142 |
+
def __call__(self, func):
|
| 143 |
+
# Register any handlers of the form 'version_#'
|
| 144 |
+
tf_op(self.names, self.domain, **self.kwargs)(func)
|
| 145 |
+
# TFLite to TF handlers have the function name 'to_tf' which takes the optional 'tf_op' kwarg
|
| 146 |
+
if hasattr(func, 'to_tf'):
|
| 147 |
+
tf_op.register_handler(func.to_tf, 0, self.names, 'com.google.tensorflow', self.kwargs)
|
| 148 |
+
return func
|
| 149 |
+
|
| 150 |
+
@staticmethod
|
| 151 |
+
def create_tfl_to_tf_mapping():
|
| 152 |
+
return tf_op.get_opsets()['com.google.tensorflow'][0]
|
lib/python3.10/site-packages/tf2onnx/rewriter/__init__.py
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
|
| 3 |
+
"""tf2onnx.rewriter module."""
|
| 4 |
+
|
| 5 |
+
from __future__ import division
|
| 6 |
+
from __future__ import print_function
|
| 7 |
+
from __future__ import unicode_literals
|
| 8 |
+
|
| 9 |
+
from tf2onnx.rewriter.cond_rewriter import rewrite_cond
|
| 10 |
+
from tf2onnx.rewriter.conv2d_with_pad_rewriter import rewrite_conv2d_with_pad
|
| 11 |
+
from tf2onnx.rewriter.dropout_rewriter import rewrite_dropout
|
| 12 |
+
from tf2onnx.rewriter.eye_rewriter import rewrite_eye
|
| 13 |
+
from tf2onnx.rewriter.flatten_rewriter import rewrite_flatten
|
| 14 |
+
from tf2onnx.rewriter.gemm_rewriter import rewrite_gemm
|
| 15 |
+
from tf2onnx.rewriter.leakyrelu_rewriter import rewrite_leakyrelu
|
| 16 |
+
from tf2onnx.rewriter.random_normal_rewriter import rewrite_random_normal
|
| 17 |
+
from tf2onnx.rewriter.random_uniform import rewrite_random_uniform, rewrite_random_uniform_fold_const
|
| 18 |
+
from tf2onnx.rewriter.rnn import rewrite_single_direction_lstm, rewrite_bi_direction_lstm, \
|
| 19 |
+
rewrite_single_direction_gru, rewrite_bi_direction_gru, \
|
| 20 |
+
rewrite_custom_rnn_cell, rewrite_generic_loop
|
| 21 |
+
from tf2onnx.rewriter.thresholded_relu_rewriter import rewrite_thresholded_relu
|
| 22 |
+
from tf2onnx.rewriter.transpose_rewriter import rewrite_transpose
|
| 23 |
+
from tf2onnx.rewriter.conv2d_with_add_rewriter import rewrite_biasadd_with_conv2d
|
| 24 |
+
from tf2onnx.rewriter.quantization_ops_rewriter import rewrite_quantize_and_dequantize
|
| 25 |
+
from tf2onnx.rewriter.layer_normalization_rewriter import rewrite_layer_normalization
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
__all__ = [
|
| 29 |
+
"rewrite_cond",
|
| 30 |
+
"rewrite_conv2d_with_pad",
|
| 31 |
+
"rewrite_dropout",
|
| 32 |
+
"rewrite_eye",
|
| 33 |
+
"rewrite_flatten",
|
| 34 |
+
"rewrite_gemm",
|
| 35 |
+
"rewrite_leakyrelu",
|
| 36 |
+
"rewrite_random_normal",
|
| 37 |
+
"rewrite_random_uniform",
|
| 38 |
+
"rewrite_random_uniform_fold_const",
|
| 39 |
+
"rewrite_thresholded_relu",
|
| 40 |
+
"rewrite_transpose",
|
| 41 |
+
"rewrite_single_direction_lstm",
|
| 42 |
+
"rewrite_bi_direction_lstm",
|
| 43 |
+
"rewrite_single_direction_gru",
|
| 44 |
+
"rewrite_bi_direction_gru",
|
| 45 |
+
"rewrite_custom_rnn_cell",
|
| 46 |
+
"rewrite_generic_loop",
|
| 47 |
+
"rewrite_biasadd_with_conv2d",
|
| 48 |
+
"rewrite_quantize_and_dequantize",
|
| 49 |
+
"rewrite_layer_normalization"
|
| 50 |
+
]
|
lib/python3.10/site-packages/tf2onnx/rewriter/bigru_rewriter.py
ADDED
|
@@ -0,0 +1,106 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
"""
|
| 5 |
+
tf2onnx.rewriter.bigru_rewriter - bigru support.
|
| 6 |
+
This rewriter depends on tf2onnx.rewriter.gru_rewriter's results.
|
| 7 |
+
"""
|
| 8 |
+
|
| 9 |
+
from __future__ import division
|
| 10 |
+
from __future__ import print_function
|
| 11 |
+
from __future__ import unicode_literals
|
| 12 |
+
|
| 13 |
+
import logging
|
| 14 |
+
import numpy as np
|
| 15 |
+
from tf2onnx import utils
|
| 16 |
+
from tf2onnx.rewriter import rnn_utils
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
logger = logging.getLogger(__name__)
|
| 20 |
+
|
| 21 |
+
# pylint: disable=invalid-name,unused-argument,missing-docstring
|
| 22 |
+
|
| 23 |
+
def process_bigru(g, bi_grus):
|
| 24 |
+
for gru_fw, gru_bw in bi_grus:
|
| 25 |
+
logger.debug("=========================")
|
| 26 |
+
logger.debug("start handling potential bidirectional gru: %s, %s", gru_fw.name, gru_bw.name)
|
| 27 |
+
|
| 28 |
+
w_fw = rnn_utils.get_np_val_for_const(g, gru_fw, 1)
|
| 29 |
+
w_bw = rnn_utils.get_np_val_for_const(g, gru_bw, 1)
|
| 30 |
+
r_fw = rnn_utils.get_np_val_for_const(g, gru_fw, 2)
|
| 31 |
+
r_bw = rnn_utils.get_np_val_for_const(g, gru_bw, 2)
|
| 32 |
+
b_fw = rnn_utils.get_np_val_for_const(g, gru_fw, 3)
|
| 33 |
+
b_bw = rnn_utils.get_np_val_for_const(g, gru_bw, 3)
|
| 34 |
+
W = np.concatenate((w_fw, w_bw), axis=0)
|
| 35 |
+
R = np.concatenate((r_fw, r_bw), axis=0)
|
| 36 |
+
B = np.concatenate((b_fw, b_bw), axis=0)
|
| 37 |
+
|
| 38 |
+
all_nodes = g.get_nodes()
|
| 39 |
+
if len(gru_fw.inputs) == len(gru_bw.inputs):
|
| 40 |
+
if len(gru_fw.inputs) > 4:
|
| 41 |
+
initializer_node = process_init_nodes(g, gru_fw, gru_bw, all_nodes)
|
| 42 |
+
else:
|
| 43 |
+
logger.error("fw, bw gru inputs num is not consistent. stop")
|
| 44 |
+
continue
|
| 45 |
+
|
| 46 |
+
# create node
|
| 47 |
+
w_name = utils.make_name("W")
|
| 48 |
+
w_node = g.make_const(w_name, W, skip_conversion=True)
|
| 49 |
+
all_nodes.append(w_node)
|
| 50 |
+
|
| 51 |
+
r_name = utils.make_name("R")
|
| 52 |
+
r_node = g.make_const(r_name, R, skip_conversion=True)
|
| 53 |
+
all_nodes.append(r_node)
|
| 54 |
+
|
| 55 |
+
b_name = utils.make_name("B")
|
| 56 |
+
b_node = g.make_const(b_name, B, skip_conversion=True)
|
| 57 |
+
all_nodes.append(b_node)
|
| 58 |
+
gru_inputs = [gru_fw.input[0], w_node.output[0],
|
| 59 |
+
r_node.output[0], b_node.output[0]]
|
| 60 |
+
if len(gru_fw.inputs) > 4:
|
| 61 |
+
gru_inputs.extend([gru_fw.input[4], initializer_node.output[0]])
|
| 62 |
+
|
| 63 |
+
direction = "bidirectional"
|
| 64 |
+
attr = {}
|
| 65 |
+
for name in rnn_utils.onnx_rnn_attr_mapping[rnn_utils.ONNX_RNN_TYPE.GRU]:
|
| 66 |
+
attr_val = gru_fw.get_attr_value(name)
|
| 67 |
+
if attr_val:
|
| 68 |
+
attr[name] = attr_val
|
| 69 |
+
# activation has to be took care, attr here is proto
|
| 70 |
+
activations = [act.decode("utf-8")
|
| 71 |
+
for act in gru_fw.get_attr_value("activations")]
|
| 72 |
+
activations += [act.decode("utf-8")
|
| 73 |
+
for act in gru_bw.get_attr_value("activations")]
|
| 74 |
+
attr.update({"direction": direction, "activations": activations})
|
| 75 |
+
|
| 76 |
+
bi_gru_node = g.make_node("GRU", gru_inputs, attr=attr, output_count=2)
|
| 77 |
+
all_nodes.append(bi_gru_node)
|
| 78 |
+
logger.debug("processing output nodes")
|
| 79 |
+
|
| 80 |
+
to_remove = [gru_fw.name, gru_fw.input[1], gru_fw.input[2], gru_fw.input[3],
|
| 81 |
+
gru_bw.name, gru_bw.input[1], gru_bw.input[2], gru_bw.input[3]]
|
| 82 |
+
rnn_utils.slice_birnn_for_original_rnn_consumers(
|
| 83 |
+
g, gru_fw, gru_bw, bi_gru_node, 0, all_nodes, to_remove)
|
| 84 |
+
rnn_utils.slice_birnn_for_original_rnn_consumers(
|
| 85 |
+
g, gru_fw, gru_bw, bi_gru_node, 1, all_nodes, to_remove)
|
| 86 |
+
|
| 87 |
+
gru_bw_old_x = gru_bw.input[0]
|
| 88 |
+
for n in to_remove:
|
| 89 |
+
g.remove_node(n)
|
| 90 |
+
|
| 91 |
+
rnn_utils.remove_reverse_in_bw_input(g, gru_bw_old_x, rnn_utils.ONNX_RNN_TYPE.GRU)
|
| 92 |
+
|
| 93 |
+
return g.get_nodes()
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
def process_init_nodes(g, gru_fw, gru_bw, to_append):
|
| 97 |
+
initializer_node = rnn_utils.process_single_init_node(
|
| 98 |
+
g, gru_fw.input[5], gru_bw.input[5], to_append)
|
| 99 |
+
|
| 100 |
+
return initializer_node
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
def rewrite_bidirectional_grus(g, ops):
|
| 104 |
+
bi_grus = rnn_utils.find_bidirectional_rnns(g, ops, rnn_utils.ONNX_RNN_TYPE.GRU)
|
| 105 |
+
|
| 106 |
+
return process_bigru(g, bi_grus)
|
lib/python3.10/site-packages/tf2onnx/rewriter/bilstm_rewriter.py
ADDED
|
@@ -0,0 +1,102 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
"""
|
| 5 |
+
tf2onnx.rewriter.bilstm_rewriter - bilstm support.
|
| 6 |
+
This rewriter depends on tf2onnx.rewriter.lstm_rewriter's results.
|
| 7 |
+
"""
|
| 8 |
+
|
| 9 |
+
from __future__ import division
|
| 10 |
+
from __future__ import print_function
|
| 11 |
+
from __future__ import unicode_literals
|
| 12 |
+
|
| 13 |
+
import logging
|
| 14 |
+
import numpy as np
|
| 15 |
+
from tf2onnx import utils
|
| 16 |
+
from tf2onnx.rewriter import rnn_utils
|
| 17 |
+
|
| 18 |
+
logger = logging.getLogger(__name__)
|
| 19 |
+
|
| 20 |
+
# pylint: disable=invalid-name,unused-argument,missing-docstring
|
| 21 |
+
|
| 22 |
+
def process_bilstm(g, bi_lstms):
|
| 23 |
+
for lstm_fw, lstm_bw in bi_lstms:
|
| 24 |
+
logger.debug("=========================")
|
| 25 |
+
logger.debug("start handling potential bidirectional lstm: %s, %s", lstm_fw.name, lstm_bw.name)
|
| 26 |
+
|
| 27 |
+
w_fw = rnn_utils.get_np_val_for_const(g, lstm_fw, 1)
|
| 28 |
+
w_bw = rnn_utils.get_np_val_for_const(g, lstm_bw, 1)
|
| 29 |
+
r_fw = rnn_utils.get_np_val_for_const(g, lstm_fw, 2)
|
| 30 |
+
r_bw = rnn_utils.get_np_val_for_const(g, lstm_bw, 2)
|
| 31 |
+
b_fw = rnn_utils.get_np_val_for_const(g, lstm_fw, 3)
|
| 32 |
+
b_bw = rnn_utils.get_np_val_for_const(g, lstm_bw, 3)
|
| 33 |
+
W = np.concatenate((w_fw, w_bw), axis=0)
|
| 34 |
+
R = np.concatenate((r_fw, r_bw), axis=0)
|
| 35 |
+
B = np.concatenate((b_fw, b_bw), axis=0)
|
| 36 |
+
|
| 37 |
+
all_nodes = g.get_nodes()
|
| 38 |
+
if len(lstm_fw.inputs) == len(lstm_bw.inputs):
|
| 39 |
+
if len(lstm_fw.inputs) > 4:
|
| 40 |
+
h_node, c_node = process_ch_init_nodes(g, lstm_fw, lstm_bw, all_nodes)
|
| 41 |
+
else:
|
| 42 |
+
logger.error("fw, bw lstm inputs num is not consistent. stop")
|
| 43 |
+
continue
|
| 44 |
+
|
| 45 |
+
# create node
|
| 46 |
+
w_name = utils.make_name("W")
|
| 47 |
+
w_node = g.make_const(w_name, W, skip_conversion=True)
|
| 48 |
+
all_nodes.append(w_node)
|
| 49 |
+
|
| 50 |
+
r_name = utils.make_name("R")
|
| 51 |
+
r_node = g.make_const(r_name, R, skip_conversion=True)
|
| 52 |
+
all_nodes.append(r_node)
|
| 53 |
+
|
| 54 |
+
b_name = utils.make_name("B")
|
| 55 |
+
b_node = g.make_const(b_name, B, skip_conversion=True)
|
| 56 |
+
all_nodes.append(b_node)
|
| 57 |
+
lstm_inputs = [lstm_fw.input[0], w_node.output[0], r_node.output[0], b_node.output[0]]
|
| 58 |
+
if len(lstm_fw.inputs) > 4:
|
| 59 |
+
lstm_inputs.extend([lstm_fw.input[4], h_node.output[0], c_node.output[0]])
|
| 60 |
+
|
| 61 |
+
attr = {"direction": "bidirectional"}
|
| 62 |
+
for name in rnn_utils.onnx_rnn_attr_mapping[rnn_utils.ONNX_RNN_TYPE.LSTM]:
|
| 63 |
+
attr_val = lstm_fw.get_attr_value(name)
|
| 64 |
+
if attr_val:
|
| 65 |
+
attr[name] = attr_val
|
| 66 |
+
|
| 67 |
+
bi_lstm_node = g.make_node("LSTM", lstm_inputs, attr=attr, output_count=3)
|
| 68 |
+
all_nodes.append(bi_lstm_node)
|
| 69 |
+
logger.debug("processing output nodes")
|
| 70 |
+
|
| 71 |
+
to_remove = [lstm_fw.name, lstm_fw.input[1], lstm_fw.input[2], lstm_fw.input[3],
|
| 72 |
+
lstm_bw.name, lstm_bw.input[1], lstm_bw.input[2], lstm_bw.input[3]]
|
| 73 |
+
rnn_utils.slice_birnn_for_original_rnn_consumers(
|
| 74 |
+
g, lstm_fw, lstm_bw, bi_lstm_node, 0, all_nodes, to_remove
|
| 75 |
+
)
|
| 76 |
+
rnn_utils.slice_birnn_for_original_rnn_consumers(
|
| 77 |
+
g, lstm_fw, lstm_bw, bi_lstm_node, 1, all_nodes, to_remove
|
| 78 |
+
)
|
| 79 |
+
rnn_utils.slice_birnn_for_original_rnn_consumers(
|
| 80 |
+
g, lstm_fw, lstm_bw, bi_lstm_node, 2, all_nodes, to_remove
|
| 81 |
+
)
|
| 82 |
+
|
| 83 |
+
lstm_bw_old_x = lstm_bw.input[0]
|
| 84 |
+
for n in to_remove:
|
| 85 |
+
g.remove_node(n)
|
| 86 |
+
|
| 87 |
+
rnn_utils.remove_reverse_in_bw_input(g, lstm_bw_old_x, rnn_utils.ONNX_RNN_TYPE.LSTM)
|
| 88 |
+
|
| 89 |
+
return g.get_nodes()
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
def process_ch_init_nodes(g, lstm_fw, lstm_bw, to_append):
|
| 93 |
+
h_node = rnn_utils.process_single_init_node(g, lstm_fw.input[5], lstm_bw.input[5], to_append)
|
| 94 |
+
c_node = rnn_utils.process_single_init_node(g, lstm_fw.input[6], lstm_bw.input[6], to_append)
|
| 95 |
+
|
| 96 |
+
return h_node, c_node
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
def rewrite_bidirectional_lstms(g, ops):
|
| 100 |
+
bi_lstms = rnn_utils.find_bidirectional_rnns(g, ops, rnn_utils.ONNX_RNN_TYPE.LSTM)
|
| 101 |
+
|
| 102 |
+
return process_bilstm(g, bi_lstms)
|
lib/python3.10/site-packages/tf2onnx/rewriter/cond_rewriter.py
ADDED
|
@@ -0,0 +1,320 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
"""
|
| 5 |
+
tf2onnx.rewriter.cond_rewriter
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
from __future__ import division
|
| 9 |
+
from __future__ import print_function
|
| 10 |
+
import logging
|
| 11 |
+
import traceback
|
| 12 |
+
from collections import OrderedDict
|
| 13 |
+
from enum import Enum
|
| 14 |
+
from tf2onnx import utils
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
logger = logging.getLogger(__name__)
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
# pylint: disable=missing-docstring,unused-argument,broad-except
|
| 21 |
+
|
| 22 |
+
class BranchType(Enum):
|
| 23 |
+
"""Type of branch"""
|
| 24 |
+
TRUE = 1
|
| 25 |
+
FALSE = 2
|
| 26 |
+
# TODO: sometimes, the branch depends on control inputs,
|
| 27 |
+
# so we just set it unknown
|
| 28 |
+
UNKNOWN = 3
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
class CondBranchContext:
|
| 32 |
+
"""Context for each branch graph"""
|
| 33 |
+
|
| 34 |
+
def __init__(self):
|
| 35 |
+
self.output = []
|
| 36 |
+
self.nodes = set()
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
class CondContext:
|
| 40 |
+
def __init__(self, cond_scope, pred_input, true_branch_context,
|
| 41 |
+
false_branch_context, switchs, merges):
|
| 42 |
+
self.cond_scope = cond_scope # name scope for this tf.cond
|
| 43 |
+
self.pred_input = pred_input # condition input
|
| 44 |
+
self.true_branch_context = true_branch_context
|
| 45 |
+
self.false_branch_context = false_branch_context
|
| 46 |
+
self.switchs = set(switchs)
|
| 47 |
+
self.merges = merges # list of merges in order
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
class CondRewriter:
|
| 51 |
+
def __init__(self, g):
|
| 52 |
+
self.g = g
|
| 53 |
+
|
| 54 |
+
def rewrite(self):
|
| 55 |
+
logger.debug("enter cond pre rewrite")
|
| 56 |
+
return self.run()
|
| 57 |
+
|
| 58 |
+
def run(self):
|
| 59 |
+
"""tf.cond rewriter"""
|
| 60 |
+
# parse tf.cond in topological sort order.
|
| 61 |
+
# NOTE: we assume the current graph is a DAG.
|
| 62 |
+
name_scope_merges = OrderedDict()
|
| 63 |
+
self.g.topological_sort(self.g.get_nodes())
|
| 64 |
+
all_nodes = self.g.get_nodes()
|
| 65 |
+
for n in all_nodes:
|
| 66 |
+
if self._is_merge(n):
|
| 67 |
+
name_scope = utils.tf_name_scope(n.name)
|
| 68 |
+
if name_scope not in name_scope_merges:
|
| 69 |
+
name_scope_merges[name_scope] = []
|
| 70 |
+
name_scope_merges[name_scope].append(n)
|
| 71 |
+
# check if need rewrite
|
| 72 |
+
if not name_scope_merges.keys():
|
| 73 |
+
return all_nodes
|
| 74 |
+
|
| 75 |
+
for name_scope, merge_nodes in name_scope_merges.items():
|
| 76 |
+
cond_context = None
|
| 77 |
+
try:
|
| 78 |
+
pred_input, true_branch_context, false_branch_context, switchs = \
|
| 79 |
+
self._parse_cond(name_scope, merge_nodes)
|
| 80 |
+
cond_context = CondContext(
|
| 81 |
+
name_scope,
|
| 82 |
+
pred_input,
|
| 83 |
+
true_branch_context,
|
| 84 |
+
false_branch_context,
|
| 85 |
+
switchs,
|
| 86 |
+
merge_nodes
|
| 87 |
+
)
|
| 88 |
+
except Exception as ex:
|
| 89 |
+
tb = traceback.format_exc()
|
| 90 |
+
logger.warning("tf.cond rewrite failed, due to exception: %s, details:%s", ex, tb)
|
| 91 |
+
continue
|
| 92 |
+
|
| 93 |
+
self._cut_off_connection(cond_context)
|
| 94 |
+
self._create_if_node(cond_context)
|
| 95 |
+
# remove nodes in If branches explicitly
|
| 96 |
+
for n in list(cond_context.true_branch_context.nodes) + list(cond_context.false_branch_context.nodes):
|
| 97 |
+
self.g.remove_node(n.name)
|
| 98 |
+
logger.debug("cond pre rewrite done")
|
| 99 |
+
|
| 100 |
+
return self.g.get_nodes()
|
| 101 |
+
|
| 102 |
+
def _get_output_shape_dtype(self, cond_context):
|
| 103 |
+
output_shapes = []
|
| 104 |
+
output_dtypes = []
|
| 105 |
+
for i, _ in enumerate(cond_context.true_branch_context.output):
|
| 106 |
+
true_output = cond_context.true_branch_context.output[i]
|
| 107 |
+
false_output = cond_context.false_branch_context.output[i]
|
| 108 |
+
true_shape = self.g.get_shape(true_output)
|
| 109 |
+
utils.make_sure(true_shape is not None, "Shape of {} is None".format(true_output))
|
| 110 |
+
true_rank = len(true_shape)
|
| 111 |
+
true_dtype = self.g.get_dtype(true_output)
|
| 112 |
+
false_shape = self.g.get_shape(false_output)
|
| 113 |
+
utils.make_sure(false_shape is not None, "Shape of {} is None".format(false_output))
|
| 114 |
+
false_rank = len(false_shape)
|
| 115 |
+
false_dtype = self.g.get_dtype(false_output)
|
| 116 |
+
# just require rank is equal
|
| 117 |
+
if true_rank != false_rank:
|
| 118 |
+
raise RuntimeError(
|
| 119 |
+
"the rank of outputs {} and {} mismatch: {}, {}".format(
|
| 120 |
+
true_output,
|
| 121 |
+
false_output,
|
| 122 |
+
true_rank,
|
| 123 |
+
false_rank
|
| 124 |
+
)
|
| 125 |
+
)
|
| 126 |
+
if true_dtype != false_dtype:
|
| 127 |
+
raise RuntimeError(
|
| 128 |
+
"the dtype of outputs {} and {} mismatch: {}, {}".format(
|
| 129 |
+
true_output,
|
| 130 |
+
false_output,
|
| 131 |
+
true_dtype,
|
| 132 |
+
false_dtype
|
| 133 |
+
)
|
| 134 |
+
)
|
| 135 |
+
output_shapes.append(utils.create_vague_shape_like(true_shape))
|
| 136 |
+
output_dtypes.append(true_dtype)
|
| 137 |
+
return output_shapes, output_dtypes
|
| 138 |
+
|
| 139 |
+
def _create_if_node(self, cond_context):
|
| 140 |
+
output_shapes, output_dtypes = self._get_output_shape_dtype(cond_context)
|
| 141 |
+
true_graph = utils.construct_graph_from_nodes(
|
| 142 |
+
self.g,
|
| 143 |
+
list(cond_context.true_branch_context.nodes),
|
| 144 |
+
cond_context.true_branch_context.output,
|
| 145 |
+
output_shapes,
|
| 146 |
+
output_dtypes
|
| 147 |
+
)
|
| 148 |
+
false_graph = utils.construct_graph_from_nodes(
|
| 149 |
+
self.g,
|
| 150 |
+
list(cond_context.false_branch_context.nodes),
|
| 151 |
+
cond_context.false_branch_context.output,
|
| 152 |
+
output_shapes,
|
| 153 |
+
output_dtypes
|
| 154 |
+
)
|
| 155 |
+
branches = {"then_branch": true_graph, "else_branch": false_graph}
|
| 156 |
+
if_node = self.g.make_node(
|
| 157 |
+
"If",
|
| 158 |
+
[cond_context.pred_input],
|
| 159 |
+
op_name_scope=cond_context.cond_scope,
|
| 160 |
+
outputs=[m.output[0] for m in cond_context.merges],
|
| 161 |
+
shapes=output_shapes,
|
| 162 |
+
dtypes=output_dtypes,
|
| 163 |
+
skip_conversion=False,
|
| 164 |
+
branches=branches
|
| 165 |
+
)
|
| 166 |
+
return if_node
|
| 167 |
+
|
| 168 |
+
def _cut_off_connection(self, cond_context):
|
| 169 |
+
"""Cut off switchs and merges, all changes are based on the origin graph"""
|
| 170 |
+
nodes_to_add = []
|
| 171 |
+
logger.debug("cut off switch connection")
|
| 172 |
+
# replace switch with identity node
|
| 173 |
+
for switch in cond_context.switchs:
|
| 174 |
+
shapes = switch.output_shapes
|
| 175 |
+
dtypes = switch.output_dtypes
|
| 176 |
+
self.g.remove_node(switch.name)
|
| 177 |
+
false_switch_id = self.g.make_node(
|
| 178 |
+
"Identity",
|
| 179 |
+
[switch.input[0]],
|
| 180 |
+
outputs=[switch.output[0]],
|
| 181 |
+
op_name_scope=cond_context.cond_scope,
|
| 182 |
+
shapes=[shapes[0]],
|
| 183 |
+
dtypes=[dtypes[0]],
|
| 184 |
+
)
|
| 185 |
+
cond_context.false_branch_context.nodes.add(false_switch_id)
|
| 186 |
+
true_switch_id = self.g.make_node(
|
| 187 |
+
"Identity",
|
| 188 |
+
[switch.input[0]],
|
| 189 |
+
outputs=[switch.output[1]],
|
| 190 |
+
op_name_scope=cond_context.cond_scope,
|
| 191 |
+
shapes=[shapes[1]],
|
| 192 |
+
dtypes=[dtypes[1]],
|
| 193 |
+
)
|
| 194 |
+
cond_context.true_branch_context.nodes.add(true_switch_id)
|
| 195 |
+
nodes_to_add.extend([false_switch_id, true_switch_id])
|
| 196 |
+
# replace merge with if node
|
| 197 |
+
logger.debug("cut off merge connection")
|
| 198 |
+
for n in cond_context.merges:
|
| 199 |
+
self.g.remove_node(n.name)
|
| 200 |
+
|
| 201 |
+
def _is_merge(self, node):
|
| 202 |
+
return node.type == "Merge"
|
| 203 |
+
|
| 204 |
+
def _is_switch(self, node):
|
| 205 |
+
return node.type == "Switch"
|
| 206 |
+
|
| 207 |
+
def _parse_cond(self, name_scope, merge_nodes):
|
| 208 |
+
"""Parse condition subgraph for these merge nodes"""
|
| 209 |
+
true_branch_context, false_branch_context, switchs = self._trace_back(name_scope, merge_nodes)
|
| 210 |
+
# find pred output from any switch
|
| 211 |
+
pred_input = list(switchs)[0].input[1]
|
| 212 |
+
return pred_input, true_branch_context, false_branch_context, switchs
|
| 213 |
+
|
| 214 |
+
def _trace_back(self, name_scope, merge_nodes):
|
| 215 |
+
"""
|
| 216 |
+
Trace back to the switch from merge nodes and collect the nodes
|
| 217 |
+
in the true/false branchs of tf.cond respectively, some comments:
|
| 218 |
+
1. According to tf.cond implementation, We make the hypothesis
|
| 219 |
+
that one tf.cond cannot comprise successive Switch nodes.
|
| 220 |
+
2. Thank to construct_graph_from_nodes, in which Identity node
|
| 221 |
+
will be added to each output of subgraph, we needn't deal with the
|
| 222 |
+
branch with only one const node specially.
|
| 223 |
+
|
| 224 |
+
TODO: This implement doesn't depend on control inputs. For a price,
|
| 225 |
+
in the case that true and false branch both only contain a
|
| 226 |
+
const node, we will throw a Exception.
|
| 227 |
+
"""
|
| 228 |
+
logger.debug("trace back from [%s]", ",".join(n.name for n in merge_nodes))
|
| 229 |
+
true_branch_context = CondBranchContext()
|
| 230 |
+
false_branch_context = CondBranchContext()
|
| 231 |
+
total_switchs = set()
|
| 232 |
+
for merge_node in merge_nodes:
|
| 233 |
+
true_branch_nodes, true_output, false_branch_nodes, false_output, switchs = \
|
| 234 |
+
self._trace_back_from_one_merge(merge_node)
|
| 235 |
+
true_branch_context.nodes |= set(true_branch_nodes)
|
| 236 |
+
true_branch_context.output.append(true_output)
|
| 237 |
+
false_branch_context.nodes |= set(false_branch_nodes)
|
| 238 |
+
false_branch_context.output.append(false_output)
|
| 239 |
+
total_switchs |= switchs
|
| 240 |
+
return true_branch_context, false_branch_context, total_switchs
|
| 241 |
+
|
| 242 |
+
def _trace_back_from_one_merge(self, merge_node):
|
| 243 |
+
"""Parse the ingredients (nodes and outputs)of true and false branch"""
|
| 244 |
+
logger.debug("trace back from %s", merge_node.name)
|
| 245 |
+
true_branch_nodes = None
|
| 246 |
+
true_output = None
|
| 247 |
+
false_branch_nodes = None
|
| 248 |
+
false_output = None
|
| 249 |
+
merge_input_1 = merge_node.input[0]
|
| 250 |
+
merge_input_2 = merge_node.input[1]
|
| 251 |
+
switchs = set()
|
| 252 |
+
|
| 253 |
+
def stop_at_switch(node):
|
| 254 |
+
if self._is_switch(node):
|
| 255 |
+
switchs.add(node)
|
| 256 |
+
return False
|
| 257 |
+
return True
|
| 258 |
+
|
| 259 |
+
branch_nodes_1 = self.g.extract_sub_graph_nodes(
|
| 260 |
+
[merge_input_1],
|
| 261 |
+
stop_at_switch
|
| 262 |
+
)
|
| 263 |
+
branch_nodes_2 = self.g.extract_sub_graph_nodes(
|
| 264 |
+
[merge_input_2],
|
| 265 |
+
stop_at_switch
|
| 266 |
+
)
|
| 267 |
+
branch_type_1 = self._branch_type(merge_input_1, branch_nodes_1)
|
| 268 |
+
branch_type_2 = self._branch_type(merge_input_2, branch_nodes_2)
|
| 269 |
+
# all possible branch types: UU, UT, UF, TU, TF, FU, FT
|
| 270 |
+
if branch_type_1 == BranchType.UNKNOWN and branch_type_2 == BranchType.UNKNOWN:
|
| 271 |
+
raise ValueError("Cannot handle the case both true and false branchs only \
|
| 272 |
+
contain const nodes for now.")
|
| 273 |
+
if branch_type_1 == branch_type_2:
|
| 274 |
+
raise ValueError("true graph and false graph are intersected")
|
| 275 |
+
if branch_type_1 == BranchType.TRUE or branch_type_2 == BranchType.FALSE:
|
| 276 |
+
true_branch_nodes = branch_nodes_1
|
| 277 |
+
true_output = merge_input_1
|
| 278 |
+
false_branch_nodes = branch_nodes_2
|
| 279 |
+
false_output = merge_input_2
|
| 280 |
+
else:
|
| 281 |
+
true_branch_nodes = branch_nodes_2
|
| 282 |
+
true_output = merge_input_2
|
| 283 |
+
false_branch_nodes = branch_nodes_1
|
| 284 |
+
false_output = merge_input_1
|
| 285 |
+
return true_branch_nodes, true_output, false_branch_nodes, false_output, switchs
|
| 286 |
+
|
| 287 |
+
def _branch_type(self, branch_output, nodes):
|
| 288 |
+
"""Infer the branch type (true, false or unknown)"""
|
| 289 |
+
branch = BranchType.UNKNOWN
|
| 290 |
+
# the branch is empty
|
| 291 |
+
if not nodes:
|
| 292 |
+
input_node = self.g.get_node_by_output(branch_output)
|
| 293 |
+
if self._is_switch(input_node):
|
| 294 |
+
if branch_output == input_node.output[0]:
|
| 295 |
+
branch = BranchType.FALSE
|
| 296 |
+
else:
|
| 297 |
+
branch = BranchType.TRUE
|
| 298 |
+
return branch
|
| 299 |
+
for node in nodes:
|
| 300 |
+
for inp in node.input:
|
| 301 |
+
input_node = self.g.get_node_by_output(inp)
|
| 302 |
+
if self._is_switch(input_node):
|
| 303 |
+
if inp == input_node.output[0]:
|
| 304 |
+
if branch == BranchType.TRUE:
|
| 305 |
+
raise ValueError("true and false graph intersect at {}".format(node.name))
|
| 306 |
+
branch = BranchType.FALSE
|
| 307 |
+
else:
|
| 308 |
+
if branch == BranchType.FALSE:
|
| 309 |
+
raise ValueError("true and false graph intersect at {}".format(node.name))
|
| 310 |
+
branch = BranchType.TRUE
|
| 311 |
+
if branch == BranchType.UNKNOWN:
|
| 312 |
+
logger.debug(
|
| 313 |
+
"branch only contains const node: [%s]",
|
| 314 |
+
",".join(n.name for n in nodes)
|
| 315 |
+
)
|
| 316 |
+
return branch
|
| 317 |
+
|
| 318 |
+
|
| 319 |
+
def rewrite_cond(g, ops):
|
| 320 |
+
return CondRewriter(g).rewrite()
|
lib/python3.10/site-packages/tf2onnx/rewriter/conv2d_with_add_rewriter.py
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
"""
|
| 5 |
+
tf2onnx.rewriter - rewrite tensorflow subgraph to onnx conv2d op with BiasAdd
|
| 6 |
+
"""
|
| 7 |
+
from tf2onnx import logging
|
| 8 |
+
from tf2onnx.graph_matcher import OpTypePattern, GraphMatcher
|
| 9 |
+
|
| 10 |
+
logger = logging.getLogger(__name__)
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
# pylint: disable=missing-docstring
|
| 14 |
+
|
| 15 |
+
def rewrite_biasadd_with_conv2d(g, ops):
|
| 16 |
+
pattern = \
|
| 17 |
+
OpTypePattern('BiasAdd', name='biasadd', inputs=[
|
| 18 |
+
OpTypePattern('Conv2D|Conv2DBackpropInput', name='conv', inputs=['*', '*']), '*'])
|
| 19 |
+
matcher = GraphMatcher(pattern)
|
| 20 |
+
match_results = list(matcher.match_ops(ops))
|
| 21 |
+
for match in match_results:
|
| 22 |
+
biasadd = match.get_op('biasadd')
|
| 23 |
+
conv = match.get_op('conv')
|
| 24 |
+
|
| 25 |
+
#backup the conv and biasadd values
|
| 26 |
+
conv_type = conv.type
|
| 27 |
+
conv_input = conv.input
|
| 28 |
+
conv_attr = conv.attr
|
| 29 |
+
dtype = g.get_dtype(conv.output[0])
|
| 30 |
+
shape = g.get_shape(conv.output[0])
|
| 31 |
+
conv_name = biasadd.name
|
| 32 |
+
conv_output = biasadd.output
|
| 33 |
+
conv_inputs = [conv_input[0], conv_input[1], biasadd.input[1]]
|
| 34 |
+
|
| 35 |
+
# Remove the Conv and BiasAdd node
|
| 36 |
+
g.remove_node(conv.name)
|
| 37 |
+
g.remove_node(biasadd.name)
|
| 38 |
+
|
| 39 |
+
g.make_node(conv_type, conv_inputs, attr=conv_attr, name=conv_name, outputs=conv_output,
|
| 40 |
+
shapes=[shape], dtypes=[dtype], skip_conversion=False)
|
| 41 |
+
return ops
|
lib/python3.10/site-packages/tf2onnx/rewriter/conv2d_with_pad_rewriter.py
ADDED
|
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
"""
|
| 5 |
+
tf2onnx.rewriter - rewrite tensorflow subgraph to onnx condv2 op with pad
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
import numpy as np
|
| 9 |
+
|
| 10 |
+
from tf2onnx import handler, logging
|
| 11 |
+
from tf2onnx.graph_matcher import OpTypePattern, GraphMatcher
|
| 12 |
+
|
| 13 |
+
logger = logging.getLogger(__name__)
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
# pylint: disable=missing-docstring
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def rewrite_conv2d_with_pad(g, ops):
|
| 20 |
+
pattern = \
|
| 21 |
+
OpTypePattern("Conv2D", name="conv", inputs=[
|
| 22 |
+
OpTypePattern("Pad", name="pad"),
|
| 23 |
+
OpTypePattern("*")
|
| 24 |
+
])
|
| 25 |
+
matcher = GraphMatcher(pattern)
|
| 26 |
+
match_results = list(matcher.match_ops(ops))
|
| 27 |
+
for match in match_results:
|
| 28 |
+
conv = match.get_op("conv")
|
| 29 |
+
pad = match.get_op("pad")
|
| 30 |
+
paddings = pad.inputs[1]
|
| 31 |
+
|
| 32 |
+
if not paddings.is_const():
|
| 33 |
+
continue
|
| 34 |
+
mode = pad.get_attr("mode")
|
| 35 |
+
if mode:
|
| 36 |
+
mode = mode.s.decode("utf-8").lower()
|
| 37 |
+
if mode not in [None, "constant"] or len(pad.input) >= 3:
|
| 38 |
+
continue
|
| 39 |
+
# Conv2D already has a pad
|
| 40 |
+
if conv.get_attr("padding").s.decode("utf-8") == "SAME":
|
| 41 |
+
continue
|
| 42 |
+
|
| 43 |
+
logger.debug("merge pad [%s] into conv [%s]", pad.name, conv.name)
|
| 44 |
+
paddings_val = np.array(paddings.get_tensor_value())
|
| 45 |
+
# can't pad on batch or channel dimensions
|
| 46 |
+
data_format = conv.get_attr("data_format").s.decode("utf-8")
|
| 47 |
+
if data_format == "NHWC":
|
| 48 |
+
if np.any(paddings_val[0]) or np.any(paddings_val[3]):
|
| 49 |
+
continue
|
| 50 |
+
paddings_val = paddings_val[1:3]
|
| 51 |
+
else:
|
| 52 |
+
if np.any(paddings_val[0]) or np.any(paddings_val[1]):
|
| 53 |
+
continue
|
| 54 |
+
paddings_val = paddings_val[2:4]
|
| 55 |
+
|
| 56 |
+
paddings_val = paddings_val.transpose().flatten()
|
| 57 |
+
g.replace_input(conv, conv.input[0], pad.input[0], 0)
|
| 58 |
+
# convert Conv2D
|
| 59 |
+
conv.type = "Conv2D"
|
| 60 |
+
func, _ = handler.tf_op.find_effective_op("Conv2D")
|
| 61 |
+
func(g, conv)
|
| 62 |
+
conv.skip_conversion = True
|
| 63 |
+
conv.set_attr("auto_pad", "NOTSET")
|
| 64 |
+
conv.set_attr("pads", paddings_val)
|
| 65 |
+
return ops
|
lib/python3.10/site-packages/tf2onnx/rewriter/custom_rnn_rewriter.py
ADDED
|
@@ -0,0 +1,228 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
"""
|
| 5 |
+
tf2onnx.rewriter.custom_rnn_rewriter - custom rnn support
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
from __future__ import division
|
| 9 |
+
from __future__ import print_function
|
| 10 |
+
|
| 11 |
+
import logging
|
| 12 |
+
import sys
|
| 13 |
+
import traceback
|
| 14 |
+
|
| 15 |
+
from onnx import onnx_pb
|
| 16 |
+
import numpy as np
|
| 17 |
+
|
| 18 |
+
from tf2onnx.graph_builder import GraphBuilder
|
| 19 |
+
from tf2onnx.rewriter.loop_rewriter_base import LoopRewriterBase, Context
|
| 20 |
+
from tf2onnx.rewriter.rnn_utils import REWRITER_RESULT, get_rnn_scope_name, parse_rnn_loop
|
| 21 |
+
from tf2onnx import utils
|
| 22 |
+
|
| 23 |
+
logger = logging.getLogger(__name__)
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
# pylint: disable=missing-docstring,invalid-name,unused-argument,using-constant-test,broad-except,protected-access
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
class CustomRnnContext(Context):
|
| 30 |
+
def __init__(self):
|
| 31 |
+
super(CustomRnnContext, self).__init__()
|
| 32 |
+
self.rnn_scope = None
|
| 33 |
+
self.time_var = None
|
| 34 |
+
self.iteration_var = None
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
class CustomRnnRewriter(LoopRewriterBase):
|
| 38 |
+
def create_context(self):
|
| 39 |
+
return CustomRnnContext()
|
| 40 |
+
|
| 41 |
+
def run(self):
|
| 42 |
+
logger.debug("enter custom rnn rewriter")
|
| 43 |
+
return self.run_internal()
|
| 44 |
+
|
| 45 |
+
def need_rewrite(self, context):
|
| 46 |
+
context.rnn_scope = get_rnn_scope_name(context.while_context_scope)
|
| 47 |
+
|
| 48 |
+
res = parse_rnn_loop(self.g, context.loop_properties, context.rnn_scope,
|
| 49 |
+
context.while_context_scope)
|
| 50 |
+
if not res:
|
| 51 |
+
logger.debug("skip the loop due to parse_rnn_loop failed")
|
| 52 |
+
return False
|
| 53 |
+
|
| 54 |
+
time_var, iteration_var = res
|
| 55 |
+
context.time_var = time_var
|
| 56 |
+
context.iteration_var = iteration_var
|
| 57 |
+
logger.debug("time var %s - enter input id (%s) shape: %s, output (%s) shape: %s", time_var.enter_name,
|
| 58 |
+
time_var.enter_input_id, self.g.get_shape(time_var.enter_input_id),
|
| 59 |
+
time_var.switch_true_identity_output.id, time_var.switch_true_identity_output.shape)
|
| 60 |
+
|
| 61 |
+
return True
|
| 62 |
+
|
| 63 |
+
def rewrite(self, context):
|
| 64 |
+
logger.debug("enter rewrite function")
|
| 65 |
+
try:
|
| 66 |
+
scan_props = context.loop_properties
|
| 67 |
+
|
| 68 |
+
state_inputs_initial_values = []
|
| 69 |
+
for state_input in scan_props.state_inputs_initial_values:
|
| 70 |
+
if self.g.opset == 8:
|
| 71 |
+
nodes = self._adapt_scan_sequence_input_or_output("input", state_input, False)
|
| 72 |
+
state_inputs_initial_values.append(nodes[-1].output[0])
|
| 73 |
+
else: # since opset 9
|
| 74 |
+
state_inputs_initial_values.append(state_input)
|
| 75 |
+
|
| 76 |
+
scan_inputs_initial_values = []
|
| 77 |
+
for scan_input in scan_props.scan_inputs_initial_values:
|
| 78 |
+
if self.g.opset == 8:
|
| 79 |
+
nodes = self._adapt_scan_sequence_input_or_output("input", scan_input, False)
|
| 80 |
+
scan_inputs_initial_values.append(nodes[-1].output[0])
|
| 81 |
+
else: # since opset 9
|
| 82 |
+
scan_inputs_initial_values.append(scan_input)
|
| 83 |
+
|
| 84 |
+
cell_g_info = context.cell_graph
|
| 85 |
+
scan_body_g = LoopRewriterBase.construct_graph_from_nodes(self.g, cell_g_info.nodes, cell_g_info.outputs)
|
| 86 |
+
for input_tensor_info in scan_props.state_inputs:
|
| 87 |
+
scan_body_g.add_graph_input(input_tensor_info.id, input_tensor_info.dtype, input_tensor_info.shape)
|
| 88 |
+
|
| 89 |
+
for input_tensor_info in scan_props.scan_inputs:
|
| 90 |
+
scan_body_g.add_graph_input(input_tensor_info.id, input_tensor_info.dtype, input_tensor_info.shape)
|
| 91 |
+
|
| 92 |
+
branches = {"body": scan_body_g}
|
| 93 |
+
scan_node = self._create_scan_node(context, scan_props,
|
| 94 |
+
state_inputs_initial_values + scan_inputs_initial_values,
|
| 95 |
+
branches=branches)
|
| 96 |
+
if not scan_node:
|
| 97 |
+
logger.error("failed to create scan node during rewrite")
|
| 98 |
+
return REWRITER_RESULT.FAIL
|
| 99 |
+
|
| 100 |
+
self._connect_scan_with_output(context, scan_node)
|
| 101 |
+
|
| 102 |
+
return REWRITER_RESULT.OK
|
| 103 |
+
|
| 104 |
+
except Exception as ex:
|
| 105 |
+
tb = traceback.format_exc()
|
| 106 |
+
logger.error("custom rnn rewrite failed, due to exception: %s, details:%s", ex, tb)
|
| 107 |
+
return REWRITER_RESULT.FAIL
|
| 108 |
+
|
| 109 |
+
def _create_scan_node(self, context, scan_props, init_values, branches=None):
|
| 110 |
+
logger.debug("create scan node")
|
| 111 |
+
# reuse original output connection id (e.g. Exit_XXX), so we don't need set shape.
|
| 112 |
+
loop_outputs_shapes = []
|
| 113 |
+
loop_outputs_dtypes = []
|
| 114 |
+
for tensor_value_info in scan_props.state_outputs_exits + scan_props.scan_outputs_exits:
|
| 115 |
+
if tensor_value_info.id:
|
| 116 |
+
# in opset 8, the first dim of scan output must be batch
|
| 117 |
+
if self.g.opset == 8:
|
| 118 |
+
loop_outputs_shapes.append([1] + tensor_value_info.shape)
|
| 119 |
+
else:
|
| 120 |
+
loop_outputs_shapes.append(tensor_value_info.shape)
|
| 121 |
+
loop_outputs_dtypes.append(tensor_value_info.dtype)
|
| 122 |
+
n = self.g.get_node_by_output(tensor_value_info.id)
|
| 123 |
+
self.g.remove_node(n.name)
|
| 124 |
+
else:
|
| 125 |
+
loop_outputs_shapes.append([-1])
|
| 126 |
+
loop_outputs_dtypes.append(None)
|
| 127 |
+
|
| 128 |
+
if self.g.opset == 8:
|
| 129 |
+
# here we did not give the sequence_length, because
|
| 130 |
+
# current batch size is 1, not original batch size
|
| 131 |
+
# original seq_length will be used by the loop body of Scan op.
|
| 132 |
+
scan_node = self.g.make_node("Scan", [""] + init_values, op_name_scope="custom_rnn_scan",
|
| 133 |
+
attr={"num_scan_inputs": len(scan_props.scan_inputs)},
|
| 134 |
+
output_count=len(scan_props.state_outputs + scan_props.scan_outputs),
|
| 135 |
+
shapes=loop_outputs_shapes, dtypes=loop_outputs_dtypes,
|
| 136 |
+
skip_conversion=False, branches=branches)
|
| 137 |
+
else:
|
| 138 |
+
scan_node = self.g.make_node("Scan", init_values, op_name_scope="custom_rnn_scan",
|
| 139 |
+
attr={"num_scan_inputs": len(scan_props.scan_inputs)},
|
| 140 |
+
output_count=len(scan_props.state_outputs + scan_props.scan_outputs),
|
| 141 |
+
shapes=loop_outputs_shapes, dtypes=loop_outputs_dtypes,
|
| 142 |
+
skip_conversion=False, branches=branches)
|
| 143 |
+
|
| 144 |
+
return scan_node
|
| 145 |
+
|
| 146 |
+
def _connect_scan_with_output(self, context, scan_node):
|
| 147 |
+
logger.debug("connect scan output with the graph")
|
| 148 |
+
|
| 149 |
+
index = 0
|
| 150 |
+
for out_tensor_value_info in context.loop_properties.state_outputs_exits:
|
| 151 |
+
if out_tensor_value_info.id:
|
| 152 |
+
if self.g.opset == 8:
|
| 153 |
+
nodes = self._adapt_scan_sequence_input_or_output("state_output_reshape",
|
| 154 |
+
scan_node.output[index], True)
|
| 155 |
+
self.g.replace_all_inputs(
|
| 156 |
+
out_tensor_value_info.id, nodes[-1].output[0]) # ops=self.g.get_nodes()
|
| 157 |
+
else: # since opset 9
|
| 158 |
+
self.g.replace_all_inputs(
|
| 159 |
+
out_tensor_value_info.id, scan_node.output[index]) # ops=self.g.get_nodes()
|
| 160 |
+
index += 1
|
| 161 |
+
|
| 162 |
+
for out_tensor_value_info in context.loop_properties.scan_outputs_exits:
|
| 163 |
+
if out_tensor_value_info.id:
|
| 164 |
+
if self.g.opset == 8:
|
| 165 |
+
nodes = self._adapt_scan_sequence_input_or_output("scan_output_reshape",
|
| 166 |
+
scan_node.output[index], True)
|
| 167 |
+
self.g.replace_all_inputs(
|
| 168 |
+
out_tensor_value_info.id, nodes[-1].output[0]) # ops=self.g.get_nodes()
|
| 169 |
+
else: # since opset 9
|
| 170 |
+
self.g.replace_all_inputs(
|
| 171 |
+
out_tensor_value_info.id, scan_node.output[index]) # ops=self.g.get_nodes()
|
| 172 |
+
index += 1
|
| 173 |
+
|
| 174 |
+
def _adapt_scan_sequence_input_or_output(self, target_name, input_id, handle_output=False):
|
| 175 |
+
nodes_to_add = []
|
| 176 |
+
shape_node = self.g.make_node("Shape", [input_id])
|
| 177 |
+
nodes_to_add.append(shape_node)
|
| 178 |
+
inferred_shape = self.g.get_shape(input_id)
|
| 179 |
+
if handle_output is True:
|
| 180 |
+
# handle output:
|
| 181 |
+
# if required dim values don't contain more than one -1,
|
| 182 |
+
# just use a const for Reshape's shape input.
|
| 183 |
+
if inferred_shape is not None and inferred_shape[1:].count(-1) <= 1:
|
| 184 |
+
new_shape_node = self.g.make_const(utils.make_name(target_name + "_target_shape"),
|
| 185 |
+
np.array(inferred_shape[1:], dtype=np.int64))
|
| 186 |
+
nodes_to_add.append(new_shape_node)
|
| 187 |
+
else:
|
| 188 |
+
# otherwise, get the dim dynamically, e.g. remove the fake batch size (e.g.1)
|
| 189 |
+
# from [1, time, real-batch, ...]
|
| 190 |
+
origin_shape_node = self.g.make_node("Cast", [shape_node.output[0]],
|
| 191 |
+
{"to": onnx_pb.TensorProto.FLOAT})
|
| 192 |
+
nodes_to_add.append(origin_shape_node)
|
| 193 |
+
|
| 194 |
+
attr = {"axes": [0], "starts": [1], "ends": [sys.maxsize]}
|
| 195 |
+
inputs_map = {"data": origin_shape_node.output[0], **attr}
|
| 196 |
+
sliced_shape_node = GraphBuilder(self.g).make_slice(inputs_map)
|
| 197 |
+
nodes_to_add.append(self.g.get_node_by_output(sliced_shape_node))
|
| 198 |
+
|
| 199 |
+
new_shape_node = self.g.make_node("Cast", [sliced_shape_node],
|
| 200 |
+
{"to": onnx_pb.TensorProto.INT64})
|
| 201 |
+
nodes_to_add.append(new_shape_node)
|
| 202 |
+
|
| 203 |
+
new_shape = inferred_shape[1:]
|
| 204 |
+
else:
|
| 205 |
+
# handle input:
|
| 206 |
+
if inferred_shape is not None and inferred_shape.count(-1) <= 1:
|
| 207 |
+
new_shape_node = self.g.make_const(utils.make_name(target_name + "_target_shape"),
|
| 208 |
+
np.array([1] + inferred_shape, dtype=np.int64))
|
| 209 |
+
nodes_to_add.append(new_shape_node)
|
| 210 |
+
else:
|
| 211 |
+
# add a fake batch size : 1
|
| 212 |
+
fake_batch_size_node = self.g.make_const(utils.make_name(target_name + "_target_shape"),
|
| 213 |
+
np.array([1], dtype=np.int64))
|
| 214 |
+
nodes_to_add.append(fake_batch_size_node)
|
| 215 |
+
new_shape_node = self.g.make_node("Concat",
|
| 216 |
+
[fake_batch_size_node.output[0], shape_node.output[0]],
|
| 217 |
+
attr={"axis": 0})
|
| 218 |
+
nodes_to_add.append(new_shape_node)
|
| 219 |
+
new_shape = [1] + inferred_shape
|
| 220 |
+
|
| 221 |
+
reshape_node = self.g.make_node("Reshape", [input_id, new_shape_node.output[0]],
|
| 222 |
+
shapes=[new_shape],
|
| 223 |
+
dtypes=[self.g.get_dtype(input_id)],
|
| 224 |
+
op_name_scope=target_name)
|
| 225 |
+
nodes_to_add.append(reshape_node)
|
| 226 |
+
logger.debug("create Reshape for scan output %s, with output shape %s",
|
| 227 |
+
reshape_node.output[0], new_shape)
|
| 228 |
+
return nodes_to_add
|
lib/python3.10/site-packages/tf2onnx/rewriter/dropout_rewriter.py
ADDED
|
@@ -0,0 +1,103 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
"""
|
| 5 |
+
tf2onnx.rewriter - rewrite tensorflow subgraph to onnx dropout op
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
import numpy as np
|
| 9 |
+
from tf2onnx import utils
|
| 10 |
+
from tf2onnx.graph_matcher import OpTypePattern, GraphMatcher
|
| 11 |
+
from tf2onnx import logging
|
| 12 |
+
|
| 13 |
+
logger = logging.getLogger(__name__)
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
# pylint: disable=missing-docstring
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def rewrite_dropout(g, ops):
|
| 20 |
+
patterns = [
|
| 21 |
+
OpTypePattern('Mul', name='outputs', inputs=[
|
| 22 |
+
OpTypePattern('RealDiv', name="input2"),
|
| 23 |
+
OpTypePattern('Floor', inputs=[
|
| 24 |
+
OpTypePattern('Add', inputs=[
|
| 25 |
+
OpTypePattern("*", name="input3"),
|
| 26 |
+
OpTypePattern('RandomUniform|RandomUniformLike'),
|
| 27 |
+
])
|
| 28 |
+
]),
|
| 29 |
+
]),
|
| 30 |
+
OpTypePattern("Mul", name="outputs", inputs=[
|
| 31 |
+
OpTypePattern("Mul", name="input2"),
|
| 32 |
+
OpTypePattern("Cast", inputs=[
|
| 33 |
+
OpTypePattern("GreaterEqual", inputs=[
|
| 34 |
+
OpTypePattern("RandomUniform|RandomUniformLike"),
|
| 35 |
+
OpTypePattern("*", name="input3")
|
| 36 |
+
])
|
| 37 |
+
])
|
| 38 |
+
]),
|
| 39 |
+
# pattern for tf-2.0 tf.nn.dropout()
|
| 40 |
+
OpTypePattern("Mul", name="outputs", inputs=[
|
| 41 |
+
OpTypePattern("Cast", inputs=[
|
| 42 |
+
OpTypePattern("GreaterEqual", inputs=[
|
| 43 |
+
OpTypePattern("RandomUniform|RandomUniformLike"),
|
| 44 |
+
OpTypePattern("*", name="input3")
|
| 45 |
+
])
|
| 46 |
+
]),
|
| 47 |
+
OpTypePattern("Mul", name="input2"),
|
| 48 |
+
]),
|
| 49 |
+
]
|
| 50 |
+
for pattern in patterns:
|
| 51 |
+
matcher = GraphMatcher(pattern, allow_reorder=True)
|
| 52 |
+
match_results = list(matcher.match_ops(ops))
|
| 53 |
+
for match in match_results:
|
| 54 |
+
input2 = match.get_op('input2')
|
| 55 |
+
input3 = match.get_op('input3')
|
| 56 |
+
outputs = match.get_op('outputs')
|
| 57 |
+
|
| 58 |
+
if not input3.is_scalar():
|
| 59 |
+
logger.warning("Dropout pattern rooted at %s does not have a "
|
| 60 |
+
"constant ratio and cannot be replaced.", outputs.name)
|
| 61 |
+
continue
|
| 62 |
+
ratio = input3.get_tensor_value()
|
| 63 |
+
|
| 64 |
+
if input2.inputs[0].is_scalar():
|
| 65 |
+
data = input2.inputs[1]
|
| 66 |
+
scaling_constant = input2.inputs[0].get_tensor_value()
|
| 67 |
+
elif input2.inputs[1].is_scalar():
|
| 68 |
+
data = input2.inputs[0]
|
| 69 |
+
scaling_constant = input2.inputs[1].get_tensor_value()
|
| 70 |
+
else:
|
| 71 |
+
logger.warning("Could not find scaling constant for dropout pattern rooted at %s. "
|
| 72 |
+
"The pattern will not be replaced with an ONNX dropout node.", outputs.name)
|
| 73 |
+
continue
|
| 74 |
+
|
| 75 |
+
#The scaling constant should be 1/(1-ratio), otherwise this isn't truly a dropout node
|
| 76 |
+
if not np.allclose([1], [scaling_constant * (1 - ratio)]):
|
| 77 |
+
logger.warning("Scaling constant %f for dropout pattern rooted at %s is inconsistent with dropout "
|
| 78 |
+
"ratio %f. The pattern will not be replaced with an ONNX dropout node.",
|
| 79 |
+
scaling_constant, outputs.name, ratio)
|
| 80 |
+
continue
|
| 81 |
+
|
| 82 |
+
nodes_to_remove = [n for n in match.get_nodes() if n.name != input3.name]
|
| 83 |
+
if not g.is_safe_to_remove_nodes(nodes_to_remove, [outputs.output[0]]):
|
| 84 |
+
logger.warning("Nodes in dropout pattern rooted at %s cannot be removed because intermediate results "
|
| 85 |
+
"of some nodes are referenced elsewhere in graph.", outputs.name)
|
| 86 |
+
continue
|
| 87 |
+
|
| 88 |
+
op_name = utils.make_name("Dropout")
|
| 89 |
+
out_name = utils.port_name(op_name)
|
| 90 |
+
new_node = g.make_node(
|
| 91 |
+
"Dropout",
|
| 92 |
+
inputs=[data.output[0]],
|
| 93 |
+
outputs=[out_name],
|
| 94 |
+
name=op_name,
|
| 95 |
+
attr={"ratio": ratio},
|
| 96 |
+
shapes=[g.get_shape(data.output[0])],
|
| 97 |
+
dtypes=[g.get_dtype(data.output[0])]
|
| 98 |
+
)
|
| 99 |
+
g.replace_all_inputs(outputs.output[0], new_node.output[0], ops=ops)
|
| 100 |
+
for n in nodes_to_remove:
|
| 101 |
+
g.remove_node(n.name)
|
| 102 |
+
|
| 103 |
+
return ops
|
lib/python3.10/site-packages/tf2onnx/rewriter/eye_rewriter.py
ADDED
|
@@ -0,0 +1,172 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
"""
|
| 5 |
+
tf2onnx.rewriter.eye_rewriter - supports tf.eye
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
from onnx import onnx_pb
|
| 9 |
+
from tf2onnx.graph_builder import GraphBuilder
|
| 10 |
+
from tf2onnx.graph_matcher import OpTypePattern, GraphMatcher
|
| 11 |
+
|
| 12 |
+
# pylint: disable=invalid-name,unused-argument,missing-docstring, unused-variable
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def rewrite_eye(g, ops):
|
| 16 |
+
# schema of eye is eye(num_rows, num_columns=None), if num_columns not specified then it's equal to num_rows
|
| 17 |
+
# tf.eye is implemented by a sub_graph which contains op "MatrixDiag" or "MatrixSetDiag" while
|
| 18 |
+
# these two ops are un-supported directly in onnx
|
| 19 |
+
# but onnx op EyeLike can be used to map the sub_graph
|
| 20 |
+
# "rewrite_eye" supports tf.eye(non_const) and tf.eye(non_const1, non_const2).
|
| 21 |
+
# tf.eye(const) and tf.eye(const1, const2) are not supported in this rewriter
|
| 22 |
+
|
| 23 |
+
# ConstantOfShape in opset 9 is used, so if opset less than 9 then do nothing
|
| 24 |
+
if g.opset < 9:
|
| 25 |
+
return g.get_nodes()
|
| 26 |
+
|
| 27 |
+
pattern1 = \
|
| 28 |
+
OpTypePattern("MatrixDiag", name="output_eye_matrix", inputs=[
|
| 29 |
+
OpTypePattern("Fill", inputs=[
|
| 30 |
+
OpTypePattern("Const", name="fill_value"),
|
| 31 |
+
OpTypePattern("ConcatV2", inputs=[
|
| 32 |
+
"*",
|
| 33 |
+
"*",
|
| 34 |
+
OpTypePattern("Pack", inputs=[
|
| 35 |
+
OpTypePattern("Minimum|Cast", name="min_or_cast")
|
| 36 |
+
])
|
| 37 |
+
])
|
| 38 |
+
])
|
| 39 |
+
])
|
| 40 |
+
pattern2 = \
|
| 41 |
+
OpTypePattern("MatrixSetDiag", name="output_eye_matrix", inputs=[
|
| 42 |
+
OpTypePattern("Fill"),
|
| 43 |
+
OpTypePattern("Fill", inputs=[
|
| 44 |
+
OpTypePattern("Const", name="fill_value"),
|
| 45 |
+
OpTypePattern("ConcatV2", inputs=[
|
| 46 |
+
"*",
|
| 47 |
+
"*",
|
| 48 |
+
OpTypePattern("Pack", inputs=[
|
| 49 |
+
OpTypePattern("Minimum|Cast", name="min_or_cast")
|
| 50 |
+
])
|
| 51 |
+
])
|
| 52 |
+
])
|
| 53 |
+
])
|
| 54 |
+
pattern3 = \
|
| 55 |
+
OpTypePattern("MatrixDiag", name="output_eye_matrix", inputs=[
|
| 56 |
+
OpTypePattern("Fill", inputs=[
|
| 57 |
+
OpTypePattern("ConcatV2", inputs=[
|
| 58 |
+
"*",
|
| 59 |
+
OpTypePattern("ExpandDims", inputs=[
|
| 60 |
+
OpTypePattern("Minimum|Cast", name="min_or_cast"),
|
| 61 |
+
"*"
|
| 62 |
+
]),
|
| 63 |
+
"*",
|
| 64 |
+
]),
|
| 65 |
+
OpTypePattern("Const", name="fill_value"),
|
| 66 |
+
])
|
| 67 |
+
])
|
| 68 |
+
pattern4 = \
|
| 69 |
+
OpTypePattern("MatrixSetDiag", name="output_eye_matrix", inputs=[
|
| 70 |
+
OpTypePattern("Fill"),
|
| 71 |
+
OpTypePattern("Fill", inputs=[
|
| 72 |
+
OpTypePattern("ConcatV2", inputs=[
|
| 73 |
+
"*",
|
| 74 |
+
OpTypePattern("ExpandDims", inputs=[
|
| 75 |
+
OpTypePattern("Minimum|Cast", name="min_or_cast"),
|
| 76 |
+
"*"
|
| 77 |
+
]),
|
| 78 |
+
"*",
|
| 79 |
+
]),
|
| 80 |
+
OpTypePattern("Const", name="fill_value"),
|
| 81 |
+
]),
|
| 82 |
+
])
|
| 83 |
+
pattern5 = \
|
| 84 |
+
OpTypePattern("MatrixDiagV3", name="output_eye_matrix", inputs=[
|
| 85 |
+
OpTypePattern("Fill", inputs=[
|
| 86 |
+
OpTypePattern("ConcatV2", inputs=[
|
| 87 |
+
"*",
|
| 88 |
+
OpTypePattern("ExpandDims", inputs=[
|
| 89 |
+
OpTypePattern("Minimum|Cast", name="min_or_cast"),
|
| 90 |
+
"*"
|
| 91 |
+
]),
|
| 92 |
+
"*",
|
| 93 |
+
]),
|
| 94 |
+
OpTypePattern("Const", name="fill_value"),
|
| 95 |
+
]),
|
| 96 |
+
"*", "*", "*", "*",
|
| 97 |
+
])
|
| 98 |
+
pattern6 = \
|
| 99 |
+
OpTypePattern("MatrixSetDiagV3", name="output_eye_matrix", inputs=[
|
| 100 |
+
OpTypePattern("Fill"),
|
| 101 |
+
OpTypePattern("Fill", inputs=[
|
| 102 |
+
OpTypePattern("ConcatV2", inputs=[
|
| 103 |
+
"*",
|
| 104 |
+
OpTypePattern("ExpandDims", inputs=[
|
| 105 |
+
OpTypePattern("Minimum|Cast", name="min_or_cast"),
|
| 106 |
+
"*"
|
| 107 |
+
]),
|
| 108 |
+
"*",
|
| 109 |
+
]),
|
| 110 |
+
OpTypePattern("Const", name="fill_value"),
|
| 111 |
+
]), "*"
|
| 112 |
+
])
|
| 113 |
+
pattern7 = \
|
| 114 |
+
OpTypePattern("MatrixDiag", name="output_eye_matrix", inputs=[
|
| 115 |
+
OpTypePattern("Fill", inputs=[
|
| 116 |
+
OpTypePattern("Reshape", inputs=[
|
| 117 |
+
OpTypePattern("Minimum|Cast", name="min_or_cast"),
|
| 118 |
+
"*",
|
| 119 |
+
]),
|
| 120 |
+
OpTypePattern("Const", name="fill_value"),
|
| 121 |
+
])
|
| 122 |
+
])
|
| 123 |
+
pattern8 = \
|
| 124 |
+
OpTypePattern("MatrixSetDiag", name="output_eye_matrix", inputs=[
|
| 125 |
+
OpTypePattern("Fill"),
|
| 126 |
+
OpTypePattern("Fill", inputs=[
|
| 127 |
+
OpTypePattern("Reshape", inputs=[
|
| 128 |
+
OpTypePattern("Minimum|Cast", name="min_or_cast"),
|
| 129 |
+
"*",
|
| 130 |
+
]),
|
| 131 |
+
OpTypePattern("Const", name="fill_value"),
|
| 132 |
+
])
|
| 133 |
+
])
|
| 134 |
+
|
| 135 |
+
for pattern in [pattern1, pattern2, pattern3, pattern4, pattern5, pattern6, pattern7, pattern8]:
|
| 136 |
+
matcher = GraphMatcher(pattern, allow_reorder=True)
|
| 137 |
+
match_results = list(matcher.match_ops(ops))
|
| 138 |
+
for match_result in match_results:
|
| 139 |
+
if match_result.get_op("fill_value").get_tensor_value() != 1:
|
| 140 |
+
continue
|
| 141 |
+
|
| 142 |
+
min_or_cast = match_result.get_op("min_or_cast")
|
| 143 |
+
if min_or_cast.type == "Minimum":
|
| 144 |
+
min_node = min_or_cast
|
| 145 |
+
elif min_or_cast.type == "Cast" and min_or_cast.inputs[0].type == "Minimum":
|
| 146 |
+
min_node = min_or_cast.inputs[0]
|
| 147 |
+
else:
|
| 148 |
+
continue
|
| 149 |
+
|
| 150 |
+
num_rows = min_node.inputs[0]
|
| 151 |
+
num_columns = min_node.inputs[1]
|
| 152 |
+
|
| 153 |
+
old_output = match_result.get_op("output_eye_matrix")
|
| 154 |
+
output_dtypes = [g.get_dtype(old_output.output[0])]
|
| 155 |
+
output_shapes = [g.get_shape(old_output.output[0])]
|
| 156 |
+
g.remove_node(old_output.name)
|
| 157 |
+
|
| 158 |
+
# onnx op "EyeLike" need a 2D tensor, so generate it
|
| 159 |
+
|
| 160 |
+
num_rows = GraphBuilder(g).make_unsqueeze(
|
| 161 |
+
{"axes": [0], "data": num_rows.output[0]}, return_node=True)
|
| 162 |
+
num_columns = GraphBuilder(g).make_unsqueeze(
|
| 163 |
+
{"axes": [0], "data": num_columns.output[0]}, return_node=True)
|
| 164 |
+
matrix_shape = g.make_node("Concat", [num_rows.output[0], num_columns.output[0]], attr={"axis": 0})
|
| 165 |
+
# cast nodes added for "ConstantOfShape" in ONNX only accepts int64 data.
|
| 166 |
+
matrix_shape_int64 = g.make_node("Cast", matrix_shape.output, attr={"to": onnx_pb.TensorProto.INT64})
|
| 167 |
+
zero_matrix = g.make_node("ConstantOfShape", matrix_shape_int64.output)
|
| 168 |
+
|
| 169 |
+
g.make_node("EyeLike", zero_matrix.output, attr={"dtype": output_dtypes[0]},
|
| 170 |
+
name=old_output.name, shapes=output_shapes, dtypes=output_dtypes, outputs=old_output.output)
|
| 171 |
+
|
| 172 |
+
return g.get_nodes()
|
lib/python3.10/site-packages/tf2onnx/rewriter/flatten_rewriter.py
ADDED
|
@@ -0,0 +1,101 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
"""
|
| 5 |
+
tf2onnx.rewriter - rewrite tensorflow subgraph to onnx flatten op
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
import numpy as np
|
| 9 |
+
|
| 10 |
+
from tf2onnx import utils
|
| 11 |
+
from tf2onnx.graph_matcher import OpTypePattern, GraphMatcher
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
# pylint: disable=missing-docstring
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
def rewrite_flatten(g, ops):
|
| 18 |
+
pattern_fixed_shape_input = \
|
| 19 |
+
OpTypePattern('Reshape', name='reshape', inputs=[
|
| 20 |
+
OpTypePattern("*", name="input"),
|
| 21 |
+
OpTypePattern('Pack', name="pack", inputs=[
|
| 22 |
+
OpTypePattern('StridedSlice', name="slice", inputs=[
|
| 23 |
+
"*", "*", "*", "*",
|
| 24 |
+
]),
|
| 25 |
+
"*",
|
| 26 |
+
]),
|
| 27 |
+
])
|
| 28 |
+
pattern_non_fixed_shape_input = \
|
| 29 |
+
OpTypePattern('Reshape', name='reshape', inputs=[
|
| 30 |
+
OpTypePattern("*", name="input"),
|
| 31 |
+
OpTypePattern('Pack', name="pack", inputs=[
|
| 32 |
+
OpTypePattern('StridedSlice', name="slice", inputs=[
|
| 33 |
+
OpTypePattern('Shape', inputs=[
|
| 34 |
+
OpTypePattern("*", name="input2")
|
| 35 |
+
]),
|
| 36 |
+
"*", "*", "*",
|
| 37 |
+
]),
|
| 38 |
+
"*",
|
| 39 |
+
]),
|
| 40 |
+
])
|
| 41 |
+
matcher = GraphMatcher(pattern_fixed_shape_input)
|
| 42 |
+
match_results_1 = list(matcher.match_ops(ops))
|
| 43 |
+
|
| 44 |
+
matcher = GraphMatcher(pattern_non_fixed_shape_input)
|
| 45 |
+
match_results_2 = list(matcher.match_ops(ops))
|
| 46 |
+
|
| 47 |
+
match_results = [(match_results_1, True), (match_results_2, False)]
|
| 48 |
+
for match_results, check_fixed_input_shape in match_results:
|
| 49 |
+
for match in match_results:
|
| 50 |
+
input_node = match.get_op('input')
|
| 51 |
+
reshape_node = match.get_op('reshape')
|
| 52 |
+
pack_node = match.get_op('pack')
|
| 53 |
+
slice_node = match.get_op('slice')
|
| 54 |
+
need_rewrite = pack_node.inputs[1].is_const() and pack_node.inputs[1].get_tensor_value() == -1
|
| 55 |
+
if not need_rewrite:
|
| 56 |
+
continue
|
| 57 |
+
|
| 58 |
+
input_shape = g.get_shape(reshape_node.input[0])
|
| 59 |
+
need_rewrite = input_shape is not None
|
| 60 |
+
if not need_rewrite:
|
| 61 |
+
continue
|
| 62 |
+
|
| 63 |
+
if check_fixed_input_shape:
|
| 64 |
+
need_rewrite = slice_node.inputs[0].is_const() and \
|
| 65 |
+
np.array_equal(list(input_shape), list(slice_node.inputs[0].get_tensor_value()))
|
| 66 |
+
if not need_rewrite:
|
| 67 |
+
continue
|
| 68 |
+
|
| 69 |
+
begin = slice_node.inputs[1].get_tensor_value(as_list=False)
|
| 70 |
+
end = slice_node.inputs[2].get_tensor_value(as_list=False)
|
| 71 |
+
strides = slice_node.inputs[3].get_tensor_value(as_list=False)
|
| 72 |
+
need_rewrite = np.array_equal(begin, [0]) and len(end) == 1 and \
|
| 73 |
+
np.array_equal(strides, [1]) and end[0] - begin[0] == 1
|
| 74 |
+
if not need_rewrite:
|
| 75 |
+
continue
|
| 76 |
+
|
| 77 |
+
to_remove = [n for n in match.get_nodes() if n != input_node]
|
| 78 |
+
safe = g.safe_to_remove_nodes(to_remove)
|
| 79 |
+
|
| 80 |
+
# Ok if reshape_node is not safe. Will make it safe later.
|
| 81 |
+
if len(to_remove) - len(safe) > 1:
|
| 82 |
+
continue
|
| 83 |
+
|
| 84 |
+
op_name = utils.make_name("Flatten")
|
| 85 |
+
out_name = utils.port_name(op_name)
|
| 86 |
+
g.make_node("Flatten", [reshape_node.input[0]], outputs=[out_name], name=op_name)
|
| 87 |
+
|
| 88 |
+
last_dim = input_shape[-1]
|
| 89 |
+
sec_last_dim = input_shape[-2]
|
| 90 |
+
new_dim = None
|
| 91 |
+
if last_dim > 0 and sec_last_dim > 0:
|
| 92 |
+
new_dim = last_dim * sec_last_dim
|
| 93 |
+
else:
|
| 94 |
+
new_dim = -1
|
| 95 |
+
|
| 96 |
+
g.set_shape(out_name, input_shape[:-2] + [new_dim])
|
| 97 |
+
g.replace_all_inputs(reshape_node.output[0], out_name, ops=ops)
|
| 98 |
+
for n in to_remove:
|
| 99 |
+
g.remove_node(n.name)
|
| 100 |
+
|
| 101 |
+
return ops
|
lib/python3.10/site-packages/tf2onnx/rewriter/gemm_rewriter.py
ADDED
|
@@ -0,0 +1,135 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
"""
|
| 5 |
+
tf2onnx.rewrite - rewrite tensorflow subgraph to onnx gemm op
|
| 6 |
+
"""
|
| 7 |
+
import logging
|
| 8 |
+
from onnx import onnx_pb
|
| 9 |
+
from tf2onnx.graph_matcher import OpTypePattern, GraphMatcher
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
# pylint: disable=missing-docstring
|
| 13 |
+
|
| 14 |
+
def rewrite_gemm(g, ops):
|
| 15 |
+
if g.opset <= 6:
|
| 16 |
+
return ops
|
| 17 |
+
|
| 18 |
+
# pattern0: alpha*A*B + beta*C
|
| 19 |
+
pattern0 = \
|
| 20 |
+
OpTypePattern('Add|AddV2', name='add', inputs=[
|
| 21 |
+
OpTypePattern('Mul', name='mul1', inputs=[
|
| 22 |
+
OpTypePattern('Const', name='alpha'),
|
| 23 |
+
OpTypePattern('MatMul', name='matmul')
|
| 24 |
+
]),
|
| 25 |
+
OpTypePattern('Mul', name='mul2', inputs=[
|
| 26 |
+
OpTypePattern('Const', name='beta'),
|
| 27 |
+
OpTypePattern('*', name='C')
|
| 28 |
+
])
|
| 29 |
+
])
|
| 30 |
+
|
| 31 |
+
# pattern1: alpha*A*B + C
|
| 32 |
+
pattern1 = \
|
| 33 |
+
OpTypePattern('Add|AddV2', name='add', inputs=[
|
| 34 |
+
OpTypePattern('Mul', name='mul1', inputs=[
|
| 35 |
+
OpTypePattern('MatMul', name='matmul'),
|
| 36 |
+
OpTypePattern('Const', name='alpha')
|
| 37 |
+
]),
|
| 38 |
+
OpTypePattern('*', name='C'),
|
| 39 |
+
])
|
| 40 |
+
|
| 41 |
+
# pattern2: A*B + beta*C
|
| 42 |
+
pattern2 = \
|
| 43 |
+
OpTypePattern('Add|AddV2', name='add', inputs=[
|
| 44 |
+
OpTypePattern('MatMul', name='matmul'),
|
| 45 |
+
OpTypePattern('Mul', name='mul2', inputs=[
|
| 46 |
+
OpTypePattern('Const', name='beta'),
|
| 47 |
+
OpTypePattern('*', name='C')
|
| 48 |
+
])
|
| 49 |
+
])
|
| 50 |
+
|
| 51 |
+
# pattern3: A*B + C
|
| 52 |
+
pattern3 = \
|
| 53 |
+
OpTypePattern('Add|AddV2', name='add', inputs=[
|
| 54 |
+
OpTypePattern('MatMul', name='matmul'),
|
| 55 |
+
OpTypePattern('*', name='C'),
|
| 56 |
+
])
|
| 57 |
+
|
| 58 |
+
# pattern4: A*B + c
|
| 59 |
+
pattern4 = \
|
| 60 |
+
OpTypePattern('BiasAdd', name='add', inputs=[
|
| 61 |
+
OpTypePattern('MatMul', name='matmul'),
|
| 62 |
+
OpTypePattern('*', name='C'),
|
| 63 |
+
])
|
| 64 |
+
|
| 65 |
+
pattern_list = [pattern0, pattern1, pattern2, pattern3, pattern4]
|
| 66 |
+
|
| 67 |
+
for pattern in pattern_list:
|
| 68 |
+
matcher = GraphMatcher(pattern, allow_reorder=True)
|
| 69 |
+
match_results = list(matcher.match_ops(ops))
|
| 70 |
+
if match_results:
|
| 71 |
+
for match in match_results:
|
| 72 |
+
matmul_node = match.get_op("matmul")
|
| 73 |
+
|
| 74 |
+
if g.get_dtype(matmul_node.input[0]) != onnx_pb.TensorProto.FLOAT:
|
| 75 |
+
logging.warning(u"For now, onnxruntime only support float32 type for Gemm rewriter")
|
| 76 |
+
continue
|
| 77 |
+
|
| 78 |
+
attr, is_valid = get_gemm_attr(match)
|
| 79 |
+
if not is_valid:
|
| 80 |
+
continue
|
| 81 |
+
|
| 82 |
+
add_node = match.get_op('add')
|
| 83 |
+
input_c_node = match.get_op("C")
|
| 84 |
+
a_edge_name = matmul_node.input[0]
|
| 85 |
+
b_edge_name = matmul_node.input[1]
|
| 86 |
+
c_edge_name = input_c_node.output[0]
|
| 87 |
+
|
| 88 |
+
a_mul_b_shape = g.get_shape(matmul_node.output[0])
|
| 89 |
+
c_shape = g.get_shape(c_edge_name)
|
| 90 |
+
if c_shape is None: continue
|
| 91 |
+
if a_mul_b_shape is None: continue
|
| 92 |
+
if -1 in c_shape + a_mul_b_shape: continue
|
| 93 |
+
if g.get_rank(a_edge_name) != 2 or g.get_rank(b_edge_name) != 2: continue
|
| 94 |
+
compatible = True
|
| 95 |
+
for i in range(1, len(c_shape) + 1):
|
| 96 |
+
if c_shape[-i] not in [1, a_mul_b_shape[-i]]:
|
| 97 |
+
compatible = False
|
| 98 |
+
if not compatible: continue
|
| 99 |
+
|
| 100 |
+
gemm = g.make_node("Gemm", inputs=[a_edge_name, b_edge_name, c_edge_name],
|
| 101 |
+
attr=attr,
|
| 102 |
+
shapes=[g.get_shape(add_node.output[0])],
|
| 103 |
+
dtypes=[g.get_dtype(add_node.output[0])], op_name_scope=matmul_node.name)
|
| 104 |
+
|
| 105 |
+
ops.append(gemm)
|
| 106 |
+
g.replace_all_inputs(add_node.output[0], gemm.output[0], ops=ops)
|
| 107 |
+
to_delete = [add_node, matmul_node]
|
| 108 |
+
g.safe_remove_nodes(to_delete)
|
| 109 |
+
return ops
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
def get_gemm_attr(match):
|
| 113 |
+
attr = {}
|
| 114 |
+
for arg in ["alpha", "beta"]:
|
| 115 |
+
arg_op = match.get_op(arg)
|
| 116 |
+
if arg_op is not None:
|
| 117 |
+
match_args = arg_op.get_tensor_value()
|
| 118 |
+
if isinstance(match_args, list):
|
| 119 |
+
if len(match_args) != 1:
|
| 120 |
+
return attr, False
|
| 121 |
+
match_args = match_args[0]
|
| 122 |
+
attr[arg] = match_args
|
| 123 |
+
for arg in ["matmul"]:
|
| 124 |
+
arg_op = match.get_op(arg)
|
| 125 |
+
if arg_op is not None:
|
| 126 |
+
match_args = arg_op.attr
|
| 127 |
+
if isinstance(match_args, dict):
|
| 128 |
+
keys = list(match_args.keys())
|
| 129 |
+
if 'transpose_a' not in keys and 'transpose_b' not in keys:
|
| 130 |
+
return attr, False
|
| 131 |
+
match_args_a = match_args['transpose_a'].i
|
| 132 |
+
attr['transA'] = match_args_a
|
| 133 |
+
match_args_b = match_args['transpose_b'].i
|
| 134 |
+
attr['transB'] = match_args_b
|
| 135 |
+
return attr, True
|
lib/python3.10/site-packages/tf2onnx/rewriter/gru_rewriter.py
ADDED
|
@@ -0,0 +1,260 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
"""
|
| 5 |
+
tf2onnx.rewriter.gru_rewriter
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
from __future__ import division
|
| 9 |
+
from __future__ import print_function
|
| 10 |
+
from __future__ import unicode_literals
|
| 11 |
+
|
| 12 |
+
import logging
|
| 13 |
+
import numpy as np
|
| 14 |
+
from tf2onnx import utils
|
| 15 |
+
from tf2onnx.graph_builder import GraphBuilder
|
| 16 |
+
from tf2onnx.rewriter.rnn_utils import RNNUnitType, get_weights_from_const_node
|
| 17 |
+
|
| 18 |
+
from tf2onnx.rewriter.unit_rnn_rewriter_base import UnitRnnRewriterBase
|
| 19 |
+
|
| 20 |
+
# pylint: disable=invalid-name,unused-argument,missing-docstring
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
logger = logging.getLogger(__name__)
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
class GRUUnitRewriter(UnitRnnRewriterBase):
|
| 27 |
+
def __init__(self, g):
|
| 28 |
+
super(GRUUnitRewriter, self).__init__(g)
|
| 29 |
+
self.gru_cell_type = None
|
| 30 |
+
self.state_variable_handlers = [
|
| 31 |
+
{"state": (self._state_variable_finder, self._connect_gru_state_to_graph)}
|
| 32 |
+
]
|
| 33 |
+
|
| 34 |
+
def run(self):
|
| 35 |
+
logger.debug("enter gru rewriter")
|
| 36 |
+
return super(GRUUnitRewriter, self).run()
|
| 37 |
+
|
| 38 |
+
def find_cell(self, context):
|
| 39 |
+
gru_cell_types = [RNNUnitType.GRUCell, RNNUnitType.GRUBlockCell, RNNUnitType.CudnnCompatibleGRUCell]
|
| 40 |
+
for cell_type in gru_cell_types:
|
| 41 |
+
cell_match = self._match_cell(context, cell_type)
|
| 42 |
+
if cell_match:
|
| 43 |
+
self.gru_cell_type = cell_type
|
| 44 |
+
logger.debug("parsing unit is %s", cell_type)
|
| 45 |
+
return cell_match
|
| 46 |
+
logger.debug("cannot parse unit")
|
| 47 |
+
return None
|
| 48 |
+
|
| 49 |
+
def get_weight_and_bias(self, context):
|
| 50 |
+
match = context.cell_match
|
| 51 |
+
|
| 52 |
+
gate_kernel = get_weights_from_const_node(self.g, match.get_op("gate_kernel"))
|
| 53 |
+
gate_bias = get_weights_from_const_node(self.g, match.get_op("gate_bias"))
|
| 54 |
+
res = {
|
| 55 |
+
"gate_kernel": gate_kernel,
|
| 56 |
+
"gate_bias": gate_bias
|
| 57 |
+
}
|
| 58 |
+
|
| 59 |
+
# differ on memory gate:
|
| 60 |
+
# GRUCell: h'_t = tanh(concat(x_t, r_t .* h_t-1) * W + b)
|
| 61 |
+
# CudnnCompatibleGRUCell: h'_t = tanh(x_t * W_x + b_x + r_t .* (h_t-1 * W_h + b_h))
|
| 62 |
+
if self.gru_cell_type == RNNUnitType.CudnnCompatibleGRUCell:
|
| 63 |
+
hidden_state_kernel = get_weights_from_const_node(
|
| 64 |
+
self.g, match.get_op("hidden_state_kernel")
|
| 65 |
+
)
|
| 66 |
+
hidden_state_bias = get_weights_from_const_node(
|
| 67 |
+
self.g, match.get_op("hidden_state_bias")
|
| 68 |
+
)
|
| 69 |
+
hidden_input_kernel = get_weights_from_const_node(
|
| 70 |
+
self.g, match.get_op("hidden_input_kernel")
|
| 71 |
+
)
|
| 72 |
+
hidden_input_bias = get_weights_from_const_node(
|
| 73 |
+
self.g, match.get_op("hidden_input_bias")
|
| 74 |
+
)
|
| 75 |
+
if not all(val is not None for val in [
|
| 76 |
+
hidden_state_kernel, hidden_state_bias,
|
| 77 |
+
hidden_input_kernel, hidden_input_bias
|
| 78 |
+
]):
|
| 79 |
+
logger.debug("rnn weights check failed, skip")
|
| 80 |
+
return None
|
| 81 |
+
hidden_kernel = np.concatenate([hidden_input_kernel, hidden_state_kernel])
|
| 82 |
+
# apply the linear transformation before multiplying by the output of reset gate
|
| 83 |
+
context.attributes["linear_before_reset"] = 1
|
| 84 |
+
res["hidden_kernel"] = hidden_kernel
|
| 85 |
+
res["hidden_bias"] = hidden_input_bias
|
| 86 |
+
# recurrence bias for hidden gate
|
| 87 |
+
res["Rb_h"] = hidden_state_bias
|
| 88 |
+
elif self.gru_cell_type in [RNNUnitType.GRUCell, RNNUnitType.GRUBlockCell]:
|
| 89 |
+
hidden_kernel = get_weights_from_const_node(self.g, match.get_op("hidden_kernel"))
|
| 90 |
+
hidden_bias = get_weights_from_const_node(self.g, match.get_op("hidden_bias"))
|
| 91 |
+
res["hidden_kernel"] = hidden_kernel
|
| 92 |
+
res["hidden_bias"] = hidden_bias
|
| 93 |
+
|
| 94 |
+
if not all(val is not None for val in res.values()):
|
| 95 |
+
logger.debug("rnn weights check failed, skip")
|
| 96 |
+
return None
|
| 97 |
+
|
| 98 |
+
logger.debug("find needed weights")
|
| 99 |
+
return res
|
| 100 |
+
|
| 101 |
+
def _state_variable_finder(self, context):
|
| 102 |
+
if self.gru_cell_type in [
|
| 103 |
+
RNNUnitType.GRUCell,
|
| 104 |
+
RNNUnitType.CudnnCompatibleGRUCell
|
| 105 |
+
]:
|
| 106 |
+
gru_cell = context.cell_match
|
| 107 |
+
return self._find_state_variable_with_select(
|
| 108 |
+
context,
|
| 109 |
+
gru_cell.get_op("cell_output").output[0],
|
| 110 |
+
[gru_cell.get_op("cell_inputs")]
|
| 111 |
+
)
|
| 112 |
+
if self.gru_cell_type == RNNUnitType.GRUBlockCell:
|
| 113 |
+
gru_block_cell = context.cell_match.get_op("gru_block_cell")
|
| 114 |
+
return self._find_state_variable_with_select(
|
| 115 |
+
context,
|
| 116 |
+
gru_block_cell.output[3],
|
| 117 |
+
[gru_block_cell]
|
| 118 |
+
)
|
| 119 |
+
return None
|
| 120 |
+
|
| 121 |
+
def parse_attributes(self, context):
|
| 122 |
+
# in tf, only activation of hidden gate is optional, input and update gate always use sigmoid
|
| 123 |
+
match = context.cell_match
|
| 124 |
+
activations = ["Sigmoid", "Tanh"]
|
| 125 |
+
if self.gru_cell_type == RNNUnitType.GRUCell:
|
| 126 |
+
activation_op = match.get_op("optional_activation")
|
| 127 |
+
activations = ["Sigmoid", activation_op.type]
|
| 128 |
+
context.attributes["activations"] = activations
|
| 129 |
+
return True
|
| 130 |
+
|
| 131 |
+
def is_valid(self, context):
|
| 132 |
+
# except for ct, ht or ct_ht, there are at most 2 state variables
|
| 133 |
+
other_state_variables_num = len(context.loop_properties.state_variables) - \
|
| 134 |
+
len(context.state_variables)
|
| 135 |
+
if other_state_variables_num > 2:
|
| 136 |
+
logger.debug("found %d other state variables", other_state_variables_num)
|
| 137 |
+
return False
|
| 138 |
+
|
| 139 |
+
# output should be no more than 1
|
| 140 |
+
outputs = context.loop_properties.scan_outputs_exits
|
| 141 |
+
if len(outputs) > 1:
|
| 142 |
+
logger.debug("found %d outputs for gru: %s", len(outputs), outputs)
|
| 143 |
+
return False
|
| 144 |
+
return True
|
| 145 |
+
|
| 146 |
+
def process_weights_and_bias(self, context):
|
| 147 |
+
"""
|
| 148 |
+
why split the data in this way should refer to code of tensorflow GRU cell and official document of ONNX GRU
|
| 149 |
+
"""
|
| 150 |
+
weights = context.weights
|
| 151 |
+
# from code of tensorflow GRU cell, it can be known that shape of hidden_kernel(or candidate_kernel)
|
| 152 |
+
# is (input_size+hidden_unit, hidden_unit)
|
| 153 |
+
hidden_size = weights["hidden_kernel"].shape[1]
|
| 154 |
+
input_size = weights["hidden_kernel"].shape[0] - hidden_size
|
| 155 |
+
weight_dtype = weights["hidden_kernel"].dtype
|
| 156 |
+
bias_dtype = weights["hidden_bias"].dtype
|
| 157 |
+
# below code will use same notation as ONNX document
|
| 158 |
+
# z means update gate, r means reset gate, h means hidden gate;
|
| 159 |
+
# at this time weights of gate include input and state, will split it next
|
| 160 |
+
r_kernel, z_kernel = np.split(weights["gate_kernel"], [hidden_size], axis=1)
|
| 161 |
+
h_kernel = weights["hidden_kernel"]
|
| 162 |
+
r_bias, z_bias = np.split(weights["gate_bias"], [hidden_size], axis=0)
|
| 163 |
+
h_bias = weights["hidden_bias"]
|
| 164 |
+
# ONNX GRU split weights of input and state, so have to split *_kernel
|
| 165 |
+
input_r_kernel, state_r_kernel = np.split(r_kernel, [input_size], axis=0)
|
| 166 |
+
input_z_kernel, state_z_kernel = np.split(z_kernel, [input_size], axis=0)
|
| 167 |
+
input_h_kernel, state_h_kernel = np.split(h_kernel, [input_size], axis=0)
|
| 168 |
+
W_zrh = np.concatenate((input_z_kernel, input_r_kernel, input_h_kernel), axis=1)
|
| 169 |
+
R_zrh = np.concatenate((state_z_kernel, state_r_kernel, state_h_kernel), axis=1)
|
| 170 |
+
# transpose weight matrix
|
| 171 |
+
W_zrh = np.transpose(np.expand_dims(W_zrh, axis=0), axes=(0, 2, 1))
|
| 172 |
+
R_zrh = np.transpose(np.expand_dims(R_zrh, axis=0), axes=(0, 2, 1))
|
| 173 |
+
W_zrh = W_zrh.astype(weight_dtype)
|
| 174 |
+
R_zrh = R_zrh.astype(weight_dtype)
|
| 175 |
+
assert W_zrh.shape == (1, 3*hidden_size, input_size)
|
| 176 |
+
assert R_zrh.shape == (1, 3*hidden_size, hidden_size)
|
| 177 |
+
Wb_zrh = np.concatenate((z_bias, r_bias, h_bias), axis=0)
|
| 178 |
+
# if tf doesn't provide bias for state, use 0
|
| 179 |
+
zero = np.zeros_like(z_bias)
|
| 180 |
+
# Rb_h is set in CudnnCompatibleGRUCell
|
| 181 |
+
Rb_h = weights["Rb_h"] if "Rb_h" in weights else zero
|
| 182 |
+
Rb_zrh = np.concatenate((zero, zero, Rb_h), axis=0)
|
| 183 |
+
B_zrh = np.concatenate((Wb_zrh, Rb_zrh), axis=0)
|
| 184 |
+
B_zrh = np.expand_dims(B_zrh, axis=0)
|
| 185 |
+
B_zrh = B_zrh.astype(bias_dtype)
|
| 186 |
+
assert B_zrh.shape == (1, 6*hidden_size)
|
| 187 |
+
# create const ONNX node
|
| 188 |
+
w_name = utils.make_name("W")
|
| 189 |
+
w_node = self.g.make_const(w_name, W_zrh, skip_conversion=True)
|
| 190 |
+
|
| 191 |
+
r_name = utils.make_name("R")
|
| 192 |
+
r_node = self.g.make_const(r_name, R_zrh, skip_conversion=True)
|
| 193 |
+
|
| 194 |
+
b_name = utils.make_name("B")
|
| 195 |
+
b_node = self.g.make_const(b_name, B_zrh, skip_conversion=True)
|
| 196 |
+
|
| 197 |
+
context.input_size = input_size
|
| 198 |
+
context.hidden_size = hidden_size
|
| 199 |
+
context.onnx_input_ids["W"] = w_node.output[0]
|
| 200 |
+
context.onnx_input_ids["R"] = r_node.output[0]
|
| 201 |
+
context.onnx_input_ids["B"] = b_node.output[0]
|
| 202 |
+
|
| 203 |
+
def process_var_init_nodes(self, context):
|
| 204 |
+
assert "state" in context.state_variables.keys()
|
| 205 |
+
initializer_input_id = context.state_variables["state"].enter_input_id
|
| 206 |
+
node = self.g.get_node_by_output(initializer_input_id)
|
| 207 |
+
if node.is_const():
|
| 208 |
+
val = node.get_tensor_value(as_list=False)
|
| 209 |
+
initial_name = utils.make_name("Const")
|
| 210 |
+
new_val = np.expand_dims(val, axis=0)
|
| 211 |
+
const_node = self.g.make_const(initial_name, new_val)
|
| 212 |
+
context.onnx_input_ids["initial_state"] = const_node.output[0]
|
| 213 |
+
return
|
| 214 |
+
squeeze_node = GraphBuilder(self.g).make_unsqueeze(
|
| 215 |
+
{'data': initializer_input_id, 'axes': [0]}, return_node=True)
|
| 216 |
+
to_replace = [n for n in self.g.get_nodes() if n != squeeze_node]
|
| 217 |
+
self.g.replace_all_inputs(initializer_input_id, squeeze_node.output[0], ops=to_replace)
|
| 218 |
+
context.onnx_input_ids["initial_state"] = squeeze_node.output[0]
|
| 219 |
+
|
| 220 |
+
def create_rnn_node(self, context):
|
| 221 |
+
# specify if the RNN is forward, reverse, or bidirectional.
|
| 222 |
+
# Must be one of forward (default), reverse, or bidirectional.
|
| 223 |
+
# Here we won't mark bidirectional/reverse, we will have another rewriter running after this one,
|
| 224 |
+
# which will based on patterns to combine a forward GRU and a backward GRU into a bidirectional one.
|
| 225 |
+
num_direction = 1
|
| 226 |
+
# todo: input_forget
|
| 227 |
+
context.attributes["direction"] = "forward"
|
| 228 |
+
context.attributes["hidden_size"] = context.hidden_size
|
| 229 |
+
inputs = context.onnx_input_ids
|
| 230 |
+
# sequence length is optional
|
| 231 |
+
seq_len_input = utils.ONNX_EMPTY_INPUT
|
| 232 |
+
if inputs["sequence_lens"]:
|
| 233 |
+
seq_len_input = inputs["sequence_lens"]
|
| 234 |
+
gru_inputs = [
|
| 235 |
+
inputs["X"], inputs["W"], inputs["R"], inputs["B"],
|
| 236 |
+
seq_len_input, inputs["initial_state"]]
|
| 237 |
+
x_shape = self.g.get_shape(gru_inputs[0])
|
| 238 |
+
x_seq_length = x_shape[0]
|
| 239 |
+
x_batch_size = x_shape[1]
|
| 240 |
+
out_dtype = self.g.get_dtype(gru_inputs[0])
|
| 241 |
+
gru_node = self.g.make_node("GRU", gru_inputs, attr=context.attributes, output_count=2,
|
| 242 |
+
shapes=[[x_seq_length, num_direction, x_batch_size, context.hidden_size],
|
| 243 |
+
[num_direction, x_batch_size, context.hidden_size]],
|
| 244 |
+
dtypes=[out_dtype, out_dtype], op_name_scope=context.rnn_scope)
|
| 245 |
+
return gru_node
|
| 246 |
+
|
| 247 |
+
def _connect_gru_state_to_graph(self, context):
|
| 248 |
+
# in tf, state output shape is: [batch, hidden]
|
| 249 |
+
# in onnx, output shape is: [number_directions, batch, hidden]
|
| 250 |
+
exit_output_id = context.state_variables["state"].exit_output.id
|
| 251 |
+
if not exit_output_id:
|
| 252 |
+
logger.debug("no one consume state variable")
|
| 253 |
+
return
|
| 254 |
+
output_id = context.rnn_node.output[1]
|
| 255 |
+
gru_state_shape = self.g.get_shape(output_id)
|
| 256 |
+
output_shape = [gru_state_shape[1], gru_state_shape[2]]
|
| 257 |
+
squeeze_node = GraphBuilder(self.g).make_squeeze(
|
| 258 |
+
{'data': output_id, "axes": [0]}, shapes=[output_shape],
|
| 259 |
+
dtypes=[self.g.get_dtype(output_id)], return_node=True)
|
| 260 |
+
self.g.replace_all_inputs(exit_output_id, squeeze_node.output[0]) # ops=self.g.get_nodes()
|
lib/python3.10/site-packages/tf2onnx/rewriter/layer_normalization_rewriter.py
ADDED
|
@@ -0,0 +1,123 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
"""
|
| 5 |
+
tf2onnx.rewrite - Rewrites a pattern from the tf layer_norm contrib op.
|
| 6 |
+
Converts a mean/variance normalization pattern (using ReduceMean, RSqrt, Sub, Mul, etc.) into InstanceNormalization
|
| 7 |
+
"""
|
| 8 |
+
from onnx import TensorProto, helper
|
| 9 |
+
from tf2onnx.graph_matcher import OpTypePattern, GraphMatcher
|
| 10 |
+
from tf2onnx.graph_builder import GraphBuilder
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
# pylint: disable=missing-docstring
|
| 14 |
+
|
| 15 |
+
def rewrite_layer_normalization(g, ops):
|
| 16 |
+
# Needs ConstantOfShape
|
| 17 |
+
if g.opset <= 9:
|
| 18 |
+
return ops
|
| 19 |
+
|
| 20 |
+
inner_pattern = \
|
| 21 |
+
OpTypePattern('Rsqrt', inputs=[
|
| 22 |
+
OpTypePattern('Add', inputs=[
|
| 23 |
+
OpTypePattern('Mean', allow_reorder=False, inputs=[
|
| 24 |
+
OpTypePattern('Square', inputs=[
|
| 25 |
+
OpTypePattern('Sub', allow_reorder=False, inputs=[
|
| 26 |
+
OpTypePattern('*', name='input'),
|
| 27 |
+
OpTypePattern('Mean', name='mean', allow_reorder=False, inputs=[
|
| 28 |
+
OpTypePattern('*', name='input_r2'),
|
| 29 |
+
OpTypePattern('Const|ConstV2', name='mean_axes')
|
| 30 |
+
])
|
| 31 |
+
])
|
| 32 |
+
]),
|
| 33 |
+
OpTypePattern('Const|ConstV2', name='variance_axes')
|
| 34 |
+
]),
|
| 35 |
+
OpTypePattern('Const|ConstV2', name='epsilon')
|
| 36 |
+
])
|
| 37 |
+
])
|
| 38 |
+
|
| 39 |
+
pattern0 = \
|
| 40 |
+
OpTypePattern('Add', name='bias_add', inputs=[
|
| 41 |
+
OpTypePattern('Mul', name='scale_mul', inputs=[
|
| 42 |
+
OpTypePattern('Mul', inputs=[
|
| 43 |
+
inner_pattern,
|
| 44 |
+
OpTypePattern('*', name='scale')
|
| 45 |
+
]),
|
| 46 |
+
OpTypePattern('Sub', inputs=[
|
| 47 |
+
OpTypePattern('*', name='input_r3'),
|
| 48 |
+
OpTypePattern('Mean', name='mean_r2')
|
| 49 |
+
])
|
| 50 |
+
]),
|
| 51 |
+
OpTypePattern('*', name='bias')
|
| 52 |
+
])
|
| 53 |
+
pattern1 = \
|
| 54 |
+
OpTypePattern('Add', name='bias_add', inputs=[
|
| 55 |
+
OpTypePattern('Mul', name='scale_mul', inputs=[
|
| 56 |
+
OpTypePattern('Mul', inputs=[
|
| 57 |
+
inner_pattern,
|
| 58 |
+
OpTypePattern('Sub', inputs=[
|
| 59 |
+
OpTypePattern('*', name='input_r3'),
|
| 60 |
+
OpTypePattern('Mean', name='mean_r2')
|
| 61 |
+
])
|
| 62 |
+
]),
|
| 63 |
+
OpTypePattern('*', name='scale')
|
| 64 |
+
]),
|
| 65 |
+
OpTypePattern('*', name='bias'),
|
| 66 |
+
])
|
| 67 |
+
pattern2 = \
|
| 68 |
+
OpTypePattern('Add', name='bias_add', inputs=[
|
| 69 |
+
OpTypePattern('Mul', name='scale_mul', inputs=[
|
| 70 |
+
OpTypePattern('Mul', inputs=[
|
| 71 |
+
OpTypePattern('*', name='scale'),
|
| 72 |
+
OpTypePattern('Sub', inputs=[
|
| 73 |
+
OpTypePattern('*', name='input_r3'),
|
| 74 |
+
OpTypePattern('Mean', name='mean_r2')
|
| 75 |
+
])
|
| 76 |
+
]),
|
| 77 |
+
inner_pattern
|
| 78 |
+
]),
|
| 79 |
+
OpTypePattern('*', name='bias'),
|
| 80 |
+
])
|
| 81 |
+
|
| 82 |
+
pattern_list = [pattern0, pattern1, pattern2]
|
| 83 |
+
|
| 84 |
+
for pattern in pattern_list:
|
| 85 |
+
matcher = GraphMatcher(pattern, allow_reorder=True)
|
| 86 |
+
match_results = list(matcher.match_ops(ops))
|
| 87 |
+
if match_results:
|
| 88 |
+
for match in match_results:
|
| 89 |
+
inp_node = match.get_op('input')
|
| 90 |
+
rank = g.get_rank(inp_node.output[0])
|
| 91 |
+
node = match.get_op('bias_add')
|
| 92 |
+
if inp_node.name != match.get_op('input_r2').name or inp_node.name != match.get_op('input_r3').name:
|
| 93 |
+
continue
|
| 94 |
+
if match.get_op('mean').name != match.get_op('mean_r2').name:
|
| 95 |
+
continue
|
| 96 |
+
inp = match.get_op('mean').input[0]
|
| 97 |
+
if rank != 3:
|
| 98 |
+
continue
|
| 99 |
+
mean_axes = match.get_op('mean_axes').get_tensor_value(as_list=True)
|
| 100 |
+
variance_axes = match.get_op('variance_axes').get_tensor_value(as_list=True)
|
| 101 |
+
mean_axes = [a % rank for a in mean_axes]
|
| 102 |
+
variance_axes = [a % rank for a in variance_axes]
|
| 103 |
+
if mean_axes != [2] or variance_axes != [2]:
|
| 104 |
+
continue
|
| 105 |
+
epsilon = match.get_op('epsilon').get_tensor_value(as_list=False).flatten().tolist()
|
| 106 |
+
if len(epsilon) != 1:
|
| 107 |
+
continue
|
| 108 |
+
scale = match.get_op('scale').output[0]
|
| 109 |
+
bias = match.get_op('bias').output[0]
|
| 110 |
+
shape = g.make_node("Shape", [inp]).output[0]
|
| 111 |
+
dim_2_shape = GraphBuilder(g).make_slice(
|
| 112 |
+
{"data": shape, "ends": [2], "starts": [1], "axes": [0]})
|
| 113 |
+
zero_tensor = helper.make_tensor("value", TensorProto.FLOAT, dims=[1], vals=[0])
|
| 114 |
+
one_tensor = helper.make_tensor("value", TensorProto.FLOAT, dims=[1], vals=[1])
|
| 115 |
+
zeros_of_shape = g.make_node("ConstantOfShape", [dim_2_shape], attr={'value': zero_tensor}).output[0]
|
| 116 |
+
ones_of_shape = g.make_node("ConstantOfShape", [dim_2_shape], attr={'value': one_tensor}).output[0]
|
| 117 |
+
norm = g.make_node("InstanceNormalization", [inp, ones_of_shape, zeros_of_shape],
|
| 118 |
+
attr={'epsilon': epsilon[0]}, op_name_scope=node.name).output[0]
|
| 119 |
+
mul = g.make_node("Mul", [norm, scale]).output[0]
|
| 120 |
+
add = g.make_node("Add", [mul, bias]).output[0]
|
| 121 |
+
g.replace_all_inputs(node.output[0], add)
|
| 122 |
+
g.remove_node(node.name)
|
| 123 |
+
return ops
|
lib/python3.10/site-packages/tf2onnx/rewriter/leakyrelu_rewriter.py
ADDED
|
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
"""
|
| 5 |
+
tf2onnx.rewriter - rewrite tensorflow subgraph to onnx leakyrelu op
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
from tf2onnx.graph_matcher import OpTypePattern, GraphMatcher
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
# pylint: disable=missing-docstring
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def rewrite_leakyrelu(g, ops):
|
| 15 |
+
if g.opset < 6:
|
| 16 |
+
return ops
|
| 17 |
+
|
| 18 |
+
pattern = \
|
| 19 |
+
OpTypePattern('Maximum', name='max', inputs=[
|
| 20 |
+
OpTypePattern('Mul', name='mul', inputs=[
|
| 21 |
+
OpTypePattern('Const', name='alpha'),
|
| 22 |
+
OpTypePattern('*', name='mul_input'),
|
| 23 |
+
]),
|
| 24 |
+
OpTypePattern('*', name='max_input'),
|
| 25 |
+
])
|
| 26 |
+
|
| 27 |
+
matcher = GraphMatcher(pattern, allow_reorder=True)
|
| 28 |
+
match_results = list(matcher.match_ops(ops))
|
| 29 |
+
for match in match_results:
|
| 30 |
+
max_node = match.get_op('max')
|
| 31 |
+
max_input_node = match.get_op('max_input')
|
| 32 |
+
mul_node = match.get_op("mul")
|
| 33 |
+
mul_input_node = match.get_op('mul_input')
|
| 34 |
+
|
| 35 |
+
max_input_edge_name = _find_edge_name_between_nodes(max_input_node, max_node)
|
| 36 |
+
mul_input_edge_name = _find_edge_name_between_nodes(mul_input_node, mul_node)
|
| 37 |
+
if max_input_edge_name == mul_input_edge_name:
|
| 38 |
+
alpha = match.get_op("alpha").get_tensor_value()
|
| 39 |
+
if alpha >= 1:
|
| 40 |
+
continue
|
| 41 |
+
leakyrelu = g.make_node("LeakyRelu", inputs=[max_input_edge_name], attr={"alpha": alpha},
|
| 42 |
+
shapes=[g.get_shape(max_node.output[0])], dtypes=[g.get_dtype(max_node.output[0])])
|
| 43 |
+
ops.append(leakyrelu)
|
| 44 |
+
g.replace_all_inputs(max_node.output[0], leakyrelu.output[0], ops=ops)
|
| 45 |
+
to_delete = [max_node, mul_node]
|
| 46 |
+
g.safe_remove_nodes(to_delete)
|
| 47 |
+
|
| 48 |
+
return ops
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
def _find_edge_name_between_nodes(src_node, consumer_node):
|
| 52 |
+
# find the first edge connection between two nodes.
|
| 53 |
+
for consumer_end in consumer_node.input:
|
| 54 |
+
for src_end in src_node.output:
|
| 55 |
+
if consumer_end == src_end:
|
| 56 |
+
return consumer_end
|
| 57 |
+
return None
|
lib/python3.10/site-packages/tf2onnx/rewriter/loop_rewriter.py
ADDED
|
@@ -0,0 +1,171 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
"""
|
| 5 |
+
tf2onnx.rewriter.loop_rewriter - generic loop support
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
from __future__ import division
|
| 9 |
+
from __future__ import print_function
|
| 10 |
+
|
| 11 |
+
import logging
|
| 12 |
+
import sys
|
| 13 |
+
import traceback
|
| 14 |
+
|
| 15 |
+
from onnx import TensorProto
|
| 16 |
+
import numpy as np
|
| 17 |
+
|
| 18 |
+
from tf2onnx.rewriter.loop_rewriter_base import LoopRewriterBase, Context
|
| 19 |
+
from tf2onnx.rewriter.rnn_utils import REWRITER_RESULT
|
| 20 |
+
from tf2onnx import utils
|
| 21 |
+
from tf2onnx.graph_builder import GraphBuilder
|
| 22 |
+
|
| 23 |
+
logger = logging.getLogger(__name__)
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
# pylint: disable=missing-docstring,invalid-name,unused-argument,using-constant-test,broad-except,protected-access
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
class LoopRewriter(LoopRewriterBase):
|
| 30 |
+
|
| 31 |
+
def create_context(self):
|
| 32 |
+
return Context()
|
| 33 |
+
|
| 34 |
+
def run(self):
|
| 35 |
+
logger.debug("enter loop rewriter")
|
| 36 |
+
return self.run_internal()
|
| 37 |
+
|
| 38 |
+
def need_rewrite(self, context):
|
| 39 |
+
return True
|
| 40 |
+
|
| 41 |
+
def rewrite(self, context):
|
| 42 |
+
logger.debug("enter rewrite function")
|
| 43 |
+
loop_node = None
|
| 44 |
+
try:
|
| 45 |
+
loop_props = context.loop_properties
|
| 46 |
+
cell_g_info = context.cell_graph
|
| 47 |
+
cond_g_info = context.cond_graph
|
| 48 |
+
|
| 49 |
+
# create a dummy loop to calculate the init condition
|
| 50 |
+
init_cond_output = self._create_subgraph_initial_cond(cond_g_info)
|
| 51 |
+
|
| 52 |
+
## create Loop body graph with existing nodes
|
| 53 |
+
|
| 54 |
+
body_nodes = set(cell_g_info.nodes + cond_g_info.nodes)
|
| 55 |
+
body_outputs = cond_g_info.outputs + cell_g_info.outputs
|
| 56 |
+
for out_tensor_value_info in body_outputs:
|
| 57 |
+
shape = out_tensor_value_info.shape
|
| 58 |
+
utils.make_sure(
|
| 59 |
+
shape is not None,
|
| 60 |
+
"Conversion of Loop requries output shape [{}] exists".format(out_tensor_value_info.id)
|
| 61 |
+
)
|
| 62 |
+
out_tensor_value_info.shape = utils.create_vague_shape_like(shape)
|
| 63 |
+
|
| 64 |
+
loop_body_g = LoopRewriterBase.construct_graph_from_nodes(self.g, body_nodes, body_outputs)
|
| 65 |
+
|
| 66 |
+
# create loop body graph inputs
|
| 67 |
+
loop_body_g.add_graph_input(utils.make_name("i"), TensorProto.INT64, ())
|
| 68 |
+
loop_body_g.add_graph_input(utils.make_name("cond"), TensorProto.BOOL, ())
|
| 69 |
+
for i, tensor_value_info in enumerate(loop_props.state_inputs):
|
| 70 |
+
input_name = tensor_value_info.id
|
| 71 |
+
if input_name is None:
|
| 72 |
+
# if the variable is not used in the body graph, then we created a fake one,
|
| 73 |
+
# the same type and shape as its corresponding output.
|
| 74 |
+
out_tensor_value_info = loop_props.state_outputs[i]
|
| 75 |
+
dtype = out_tensor_value_info.dtype
|
| 76 |
+
shape = out_tensor_value_info.shape
|
| 77 |
+
input_name = utils.make_name("unused_state_input_")
|
| 78 |
+
else:
|
| 79 |
+
dtype = tensor_value_info.dtype
|
| 80 |
+
shape = tensor_value_info.shape
|
| 81 |
+
|
| 82 |
+
loop_body_g.add_graph_input(input_name, dtype, utils.create_vague_shape_like(shape))
|
| 83 |
+
|
| 84 |
+
for input_ta in loop_props.tensor_array_inputs:
|
| 85 |
+
# Loop does not have scan inputs, so we use Gather to get data for each iteration.
|
| 86 |
+
gb = GraphBuilder(loop_body_g)
|
| 87 |
+
index_node = gb.make_unsqueeze({'data': input_ta.index_input_id, "axes": [0]}, return_node=True)
|
| 88 |
+
gather_node = loop_body_g.make_node("Gather", [input_ta.data_input_id, index_node.output[0]])
|
| 89 |
+
data_node = gb.make_squeeze({'data': gather_node.output[0], "axes": [0]}, return_node=True)
|
| 90 |
+
loop_body_g.replace_all_inputs(input_ta.consumer.id, data_node.output[0]) # ops=loop_body_g.get_nodes()
|
| 91 |
+
|
| 92 |
+
## create Loop node
|
| 93 |
+
branches = {"body": loop_body_g}
|
| 94 |
+
loop_node = self._create_loop_node(context, loop_props, init_cond_output, branches=branches)
|
| 95 |
+
if not loop_node:
|
| 96 |
+
logger.error("failed to create loop node during rewrite")
|
| 97 |
+
return REWRITER_RESULT.FAIL
|
| 98 |
+
|
| 99 |
+
logger.debug("rewrite successfully")
|
| 100 |
+
return REWRITER_RESULT.OK
|
| 101 |
+
|
| 102 |
+
except Exception as ex:
|
| 103 |
+
tb = traceback.format_exc()
|
| 104 |
+
logger.error("loop rewrite failed, due to exception: %s, details:%s", ex, tb)
|
| 105 |
+
return REWRITER_RESULT.FAIL
|
| 106 |
+
|
| 107 |
+
def _create_subgraph_initial_cond(self, cond_graph):
|
| 108 |
+
"""Create subgraph to calculate initial cond."""
|
| 109 |
+
# copy condition subgraph to parent graph
|
| 110 |
+
copied_nodes = []
|
| 111 |
+
name_scope = utils.make_name("copy")
|
| 112 |
+
for node in cond_graph.nodes:
|
| 113 |
+
new_name = "{}/{}".format(name_scope, node.name)
|
| 114 |
+
new_outputs = ["{}/{}".format(name_scope, out) for out in node.output]
|
| 115 |
+
# some inputs are out of cond_graph.nodes, keep them intact
|
| 116 |
+
new_inputs = []
|
| 117 |
+
for inp in node.input:
|
| 118 |
+
if self.g.get_node_by_output(inp) in cond_graph.nodes:
|
| 119 |
+
new_inputs.append("{}/{}".format(name_scope, inp))
|
| 120 |
+
else:
|
| 121 |
+
new_inputs.append(inp)
|
| 122 |
+
|
| 123 |
+
new_node = self.g.make_node(
|
| 124 |
+
node.type, new_inputs, outputs=new_outputs,
|
| 125 |
+
attr=node.attr, name=new_name,
|
| 126 |
+
shapes=node.output_shapes, dtypes=node.output_dtypes,
|
| 127 |
+
skip_conversion=node.skip_conversion, infer_shape_dtype=False
|
| 128 |
+
)
|
| 129 |
+
body_graphs = node.graph.contained_graphs.pop(node.name, None)
|
| 130 |
+
if body_graphs:
|
| 131 |
+
for attr_name, body_graph in body_graphs.items():
|
| 132 |
+
body_graph.parent_graph = self.g
|
| 133 |
+
new_node.set_body_graph_as_attr(attr_name, body_graph)
|
| 134 |
+
copied_nodes.append(new_node)
|
| 135 |
+
|
| 136 |
+
# replace all inputs of condition graph by initializer (enter_input)
|
| 137 |
+
for loop_var in cond_graph.dependent_vars:
|
| 138 |
+
self.g.replace_all_inputs(
|
| 139 |
+
loop_var.next_iteration_input.id,
|
| 140 |
+
loop_var.enter_input_id, ops=copied_nodes)
|
| 141 |
+
init_cond_output = "{}/{}".format(name_scope, cond_graph.outputs[0].id)
|
| 142 |
+
self.g.set_dtype(init_cond_output, cond_graph.outputs[0].dtype)
|
| 143 |
+
self.g.set_shape(init_cond_output, cond_graph.outputs[0].shape)
|
| 144 |
+
return init_cond_output
|
| 145 |
+
|
| 146 |
+
def _create_loop_node(self, context, loop_props, init_cond_output, branches=None):
|
| 147 |
+
loop_outputs = []
|
| 148 |
+
loop_output_shapes = []
|
| 149 |
+
loop_output_dtypes = []
|
| 150 |
+
for tensor_value_info in loop_props.state_outputs_exits + loop_props.scan_outputs_exits:
|
| 151 |
+
if tensor_value_info.id:
|
| 152 |
+
loop_outputs.append(tensor_value_info.id)
|
| 153 |
+
loop_output_shapes.append(tensor_value_info.shape)
|
| 154 |
+
loop_output_dtypes.append(tensor_value_info.dtype)
|
| 155 |
+
n = self.g.get_node_by_output(tensor_value_info.id)
|
| 156 |
+
self.g.remove_node(n.name)
|
| 157 |
+
else:
|
| 158 |
+
loop_outputs.append(utils.make_name("unused_loop_output_"))
|
| 159 |
+
loop_output_shapes.append([-1])
|
| 160 |
+
loop_output_dtypes.append(None)
|
| 161 |
+
|
| 162 |
+
# trip count and cond are not used, giving them values just because bug
|
| 163 |
+
# (https://github.com/Microsoft/onnxruntime/issues/255) of onnxruntime.
|
| 164 |
+
trip_cnt = self.g.make_const(utils.make_name("trip_count"), np.array(sys.maxsize, dtype=np.int64))
|
| 165 |
+
loop_node = self.g.make_node("Loop", [trip_cnt.output[0]] + [init_cond_output] +
|
| 166 |
+
loop_props.state_inputs_initial_values, # ONNX Loop support state inputs only
|
| 167 |
+
outputs=loop_outputs, op_name_scope="generic_loop",
|
| 168 |
+
shapes=loop_output_shapes, dtypes=loop_output_dtypes,
|
| 169 |
+
skip_conversion=False, branches=branches)
|
| 170 |
+
|
| 171 |
+
return loop_node
|
lib/python3.10/site-packages/tf2onnx/rewriter/loop_rewriter_base.py
ADDED
|
@@ -0,0 +1,451 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
"""
|
| 5 |
+
tf2onnx.rewriter.loop_rewriter_base
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
from __future__ import division
|
| 9 |
+
from __future__ import print_function
|
| 10 |
+
|
| 11 |
+
import logging
|
| 12 |
+
from collections import OrderedDict
|
| 13 |
+
from tf2onnx import utils
|
| 14 |
+
from tf2onnx.graph_matcher import OpTypePattern, GraphMatcher
|
| 15 |
+
from tf2onnx.utils import is_tf_loopcond_op, is_tf_tensor_array_op
|
| 16 |
+
from tf2onnx.utils import is_tf_tensor_array_gather_op, is_tf_tensor_array_write_op
|
| 17 |
+
from tf2onnx.rewriter.rnn_utils import REWRITER_RESULT
|
| 18 |
+
from tf2onnx.utils import TensorValueInfo
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
logger = logging.getLogger(__name__)
|
| 22 |
+
INVALID_INPUT_ID = utils.make_name("invalid_input_id")
|
| 23 |
+
|
| 24 |
+
# todo(pengwa) remove protected-access with changes to Graph/Node later.
|
| 25 |
+
# pylint: disable=missing-docstring,invalid-name,unused-argument,using-constant-test,protected-access
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
class Context(object):
|
| 29 |
+
def __init__(self):
|
| 30 |
+
self.while_context_scope = None
|
| 31 |
+
self.loop_properties = LoopProperties()
|
| 32 |
+
self.loop_cond = None
|
| 33 |
+
|
| 34 |
+
self.cell_graph = None # GraphInfo of cell graph
|
| 35 |
+
self.cond_graph = None # GraphInfo of condition graph
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
class GraphInfo(object):
|
| 39 |
+
def __init__(self, ops, inputs, outputs):
|
| 40 |
+
self.nodes = ops
|
| 41 |
+
self.inputs = inputs # list of TensorValueInfo in order
|
| 42 |
+
self.outputs = outputs # list of TensorValueInfo in order
|
| 43 |
+
self.dependent_vars = None
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
class LoopProperties(object):
|
| 47 |
+
def __init__(self):
|
| 48 |
+
# use enter name as key, they are initial inputs.
|
| 49 |
+
# we don't use enter_input_id because it might be
|
| 50 |
+
# used as initial input for more than one Enter nodes.
|
| 51 |
+
self.state_variables = OrderedDict()
|
| 52 |
+
self.scan_variables = OrderedDict()
|
| 53 |
+
|
| 54 |
+
self.tensor_array_inputs = [] # list of type InputTensorArray
|
| 55 |
+
|
| 56 |
+
def add_variable(self, var):
|
| 57 |
+
utils.make_sure(var.enter_name not in self.scan_variables,
|
| 58 |
+
"variable %s already exists as scan variable.", var.enter_name)
|
| 59 |
+
utils.make_sure(var.enter_name not in self.state_variables,
|
| 60 |
+
"variable %s already exists as state variable.", var.enter_name)
|
| 61 |
+
if not var.is_tensor_array:
|
| 62 |
+
self.state_variables[var.enter_name] = var
|
| 63 |
+
else:
|
| 64 |
+
self.scan_variables[var.enter_name] = var
|
| 65 |
+
|
| 66 |
+
def get_variables(self, checker):
|
| 67 |
+
if not checker:
|
| 68 |
+
return self.all_variables.values()
|
| 69 |
+
return [v for v in self.all_variables.values() if checker(v)]
|
| 70 |
+
|
| 71 |
+
@property
|
| 72 |
+
def all_variables(self):
|
| 73 |
+
items = self.state_variables.copy()
|
| 74 |
+
items.update(self.scan_variables)
|
| 75 |
+
return items
|
| 76 |
+
|
| 77 |
+
# state inputs and outputs are in pairs, even though some outputs are not depending on corresponding input,
|
| 78 |
+
# we leave the input id be None.
|
| 79 |
+
@property
|
| 80 |
+
def state_inputs(self):
|
| 81 |
+
return [v.switch_true_identity_output for v in self.state_variables.values()]
|
| 82 |
+
|
| 83 |
+
@property
|
| 84 |
+
def state_inputs_initial_values(self):
|
| 85 |
+
return [v.enter_input_id for v in self.state_variables.values()]
|
| 86 |
+
|
| 87 |
+
@property
|
| 88 |
+
def state_outputs(self):
|
| 89 |
+
return [v.next_iteration_input for v in self.state_variables.values()]
|
| 90 |
+
|
| 91 |
+
@property
|
| 92 |
+
def state_outputs_exits(self):
|
| 93 |
+
return [v.exit_output for v in self.state_variables.values()]
|
| 94 |
+
|
| 95 |
+
# scan output (e.g. tensor array) won't be used by next iteration calculation
|
| 96 |
+
@property
|
| 97 |
+
def scan_outputs(self):
|
| 98 |
+
return [v.next_iteration_input for v in self.scan_variables.values()]
|
| 99 |
+
|
| 100 |
+
@property
|
| 101 |
+
def scan_outputs_exits(self):
|
| 102 |
+
return [v.exit_output for v in self.scan_variables.values()]
|
| 103 |
+
|
| 104 |
+
# treat input tensor array as scan inputs
|
| 105 |
+
def add_scan_input(self, input_tensor_array):
|
| 106 |
+
self.tensor_array_inputs.append(input_tensor_array)
|
| 107 |
+
|
| 108 |
+
# usually it is called TensorArrayReadV3
|
| 109 |
+
@property
|
| 110 |
+
def scan_inputs(self):
|
| 111 |
+
return [i.consumer for i in self.tensor_array_inputs]
|
| 112 |
+
|
| 113 |
+
@property
|
| 114 |
+
def scan_inputs_initial_values(self):
|
| 115 |
+
return [i.data_input_id for i in self.tensor_array_inputs]
|
| 116 |
+
|
| 117 |
+
class LoopVariable(object):
|
| 118 |
+
"""In TensorFlow loop, all loop variables are listed both in iteration body graph's inputs, and outputs.
|
| 119 |
+
Loop (state variable 1, state variable 2) {
|
| 120 |
+
# do the calculation
|
| 121 |
+
# updated state variable 1 not necessarily only depends on state variable 1, it might depend
|
| 122 |
+
# on 0, 1 or more state variables.
|
| 123 |
+
# So if it depends on 0 state variable, then switch_true_identity_output.id is None. For this case,
|
| 124 |
+
# during conversion, a fake input for ONNX Loop body graph is created, but not consumed by any node.
|
| 125 |
+
return (updated) state variable 1, (updated) state variable 2, scan variable 1, scan variable 2
|
| 126 |
+
}
|
| 127 |
+
|
| 128 |
+
Here we take the perspective of body graph's outputs:
|
| 129 |
+
1. start from the iteration body graph's output (e.g. next_iteration_input.id)
|
| 130 |
+
2. find body graph generating it (those node between NextIteration and Switch)
|
| 131 |
+
3. find the variable initial value (e.g. enter_input_id)
|
| 132 |
+
4. check whether it is a tensor array
|
| 133 |
+
5. the body graph output might go to next iteration as corresponding input
|
| 134 |
+
(e.g. switch_true_identity_output.id).
|
| 135 |
+
"""
|
| 136 |
+
def __init__(self, enter_name, enter_input_id, next_iteration_input_id,
|
| 137 |
+
switch_true_identity_output_id, exit_output_id, is_tensor_array, ta_index_id, g):
|
| 138 |
+
self.enter_name = enter_name
|
| 139 |
+
self.enter_input_id = enter_input_id
|
| 140 |
+
|
| 141 |
+
# the output of iteration body graph for this variable
|
| 142 |
+
# should not be None
|
| 143 |
+
utils.make_sure(next_iteration_input_id, "next_iteration_input_id should not be None")
|
| 144 |
+
self.next_iteration_input = TensorValueInfo(next_iteration_input_id, g)
|
| 145 |
+
|
| 146 |
+
# the starting point of iteration body graph,
|
| 147 |
+
# might be None when this variable value (either initial value or last iteration output value)
|
| 148 |
+
# is not consumed iteration body graph nodes.
|
| 149 |
+
self.switch_true_identity_output = TensorValueInfo(switch_true_identity_output_id, g)
|
| 150 |
+
|
| 151 |
+
# the switch_false branch is ended with Exit, which is a boundary for the loop,
|
| 152 |
+
# might be None when no consumers for the variable output.
|
| 153 |
+
self.exit_output = TensorValueInfo(exit_output_id, g)
|
| 154 |
+
|
| 155 |
+
# only applicable for tensor array variable
|
| 156 |
+
self.is_tensor_array = is_tensor_array
|
| 157 |
+
# todo: need check ta's index variable is a scalar starting from 1, and increase by 1 each iteration.
|
| 158 |
+
# then we can be sure this is equivalent to scan output behavior.
|
| 159 |
+
self.ta_index_id = ta_index_id
|
| 160 |
+
|
| 161 |
+
|
| 162 |
+
class InputTensorArray(object):
|
| 163 |
+
def __init__(self, data_input_id, index_input_id, consumer_id, g):
|
| 164 |
+
self.index_input_id = index_input_id
|
| 165 |
+
self.data_input_id = data_input_id
|
| 166 |
+
|
| 167 |
+
# tensor array is unstacked before being used in loop, consumer_id is the node
|
| 168 |
+
# (in the iteration body graph) consuming one of the element of tensor array.
|
| 169 |
+
self.consumer = TensorValueInfo(consumer_id, g)
|
| 170 |
+
|
| 171 |
+
|
| 172 |
+
class LoopRewriterBase(object):
|
| 173 |
+
def __init__(self, g):
|
| 174 |
+
self.g = g
|
| 175 |
+
self.ta_read_input_pattern = \
|
| 176 |
+
OpTypePattern("TensorArrayReadV3", name="ta_read", inputs=[
|
| 177 |
+
OpTypePattern("Enter", name="ta_enter", inputs=[
|
| 178 |
+
OpTypePattern("TensorArrayV3")
|
| 179 |
+
]),
|
| 180 |
+
OpTypePattern("Identity", name="ta_index"),
|
| 181 |
+
OpTypePattern("Enter", name="ta_scatter_enter", inputs=[
|
| 182 |
+
OpTypePattern("TensorArrayScatterV3", name="ta_input_scatter")
|
| 183 |
+
]),
|
| 184 |
+
])
|
| 185 |
+
|
| 186 |
+
def create_context(self):
|
| 187 |
+
return Context()
|
| 188 |
+
|
| 189 |
+
def need_rewrite(self, context):
|
| 190 |
+
return False
|
| 191 |
+
|
| 192 |
+
def rewrite(self, context):
|
| 193 |
+
return REWRITER_RESULT.FAIL
|
| 194 |
+
|
| 195 |
+
def run_internal(self):
|
| 196 |
+
loopcond_ops = []
|
| 197 |
+
for op in self.g.get_nodes():
|
| 198 |
+
if is_tf_loopcond_op(op):
|
| 199 |
+
loopcond_ops.append(op)
|
| 200 |
+
|
| 201 |
+
# self.g.get_nodes may change inside this loop so that we parse all LoopCond first
|
| 202 |
+
for op in loopcond_ops:
|
| 203 |
+
logger.debug("======================\n handling loop cond node called %s", op.name)
|
| 204 |
+
context = self.create_context()
|
| 205 |
+
context.loop_cond = op
|
| 206 |
+
|
| 207 |
+
self._check_in_read_only_mode(context)
|
| 208 |
+
|
| 209 |
+
if self.need_rewrite(context):
|
| 210 |
+
# cut off connection between cell/cond graphs and useless nodes like Merge, NextIteration.
|
| 211 |
+
self._cut_off_connection_for_cell(context)
|
| 212 |
+
context.cell_graph = self._crop_loop_body_sub_graph(context)
|
| 213 |
+
context.cond_graph = self._crop_loop_condition_sub_graph(context)
|
| 214 |
+
|
| 215 |
+
_result = self.rewrite(context)
|
| 216 |
+
if _result == REWRITER_RESULT.OK:
|
| 217 |
+
logger.debug("rewrite successfully")
|
| 218 |
+
elif _result == REWRITER_RESULT.SKIP:
|
| 219 |
+
logger.debug("rewrite skipped for LoopCond called %s", op.name)
|
| 220 |
+
continue
|
| 221 |
+
elif _result == REWRITER_RESULT.FAIL:
|
| 222 |
+
raise ValueError("rewrite failed, so just fast fail it")
|
| 223 |
+
|
| 224 |
+
if self.g.outputs:
|
| 225 |
+
# clean the graph based on output names.
|
| 226 |
+
self.g.delete_unused_nodes(self.g.outputs)
|
| 227 |
+
return self.g.get_nodes()
|
| 228 |
+
|
| 229 |
+
def _check_in_read_only_mode(self, context):
|
| 230 |
+
self._parse_loop_variables(context)
|
| 231 |
+
self._parse_input_ta(context)
|
| 232 |
+
|
| 233 |
+
def _parse_loop_variables(self, context):
|
| 234 |
+
loop_cond_op = context.loop_cond
|
| 235 |
+
parts = loop_cond_op.name.split('/')
|
| 236 |
+
context.while_context_scope = '/'.join(parts[0:-1]) + "/"
|
| 237 |
+
logger.debug("found while loop scope %s", context.while_context_scope)
|
| 238 |
+
|
| 239 |
+
switch_nodes = self.g.find_output_consumers(loop_cond_op.output[0])
|
| 240 |
+
for s in switch_nodes:
|
| 241 |
+
if s.type != 'Switch':
|
| 242 |
+
raise ValueError("LoopCond's output node should be followed with a Switch node")
|
| 243 |
+
|
| 244 |
+
loop_var = self._get_loop_var_from_switch(s)
|
| 245 |
+
context.loop_properties.add_variable(loop_var)
|
| 246 |
+
|
| 247 |
+
def _parse_input_ta(self, context):
|
| 248 |
+
graph_inputs = [v.switch_true_identity_output.id for v in context.loop_properties.all_variables.values()
|
| 249 |
+
if v.switch_true_identity_output.id]
|
| 250 |
+
matcher = GraphMatcher(self.ta_read_input_pattern, allow_reorder=False)
|
| 251 |
+
match_results = matcher.match_ops(self.g.get_nodes())
|
| 252 |
+
match_results = [r for r in match_results if r.get_op("ta_index").output[0] in graph_inputs]
|
| 253 |
+
for match in match_results:
|
| 254 |
+
ta_input_scatter = match.get_op("ta_input_scatter")
|
| 255 |
+
# the 3rd input of scatter is the value
|
| 256 |
+
data_input_id = ta_input_scatter.input[2]
|
| 257 |
+
ta_read_node = match.get_op("ta_read")
|
| 258 |
+
|
| 259 |
+
# todo: need check ta's index variable is a scalar starting from 1, and increase by 1 each iteration.
|
| 260 |
+
# then we can be sure this is equivalent to scan input behavior.
|
| 261 |
+
index_input_id = ta_read_node.input[1]
|
| 262 |
+
unstacked_ta_consumer = match.get_op("ta_read").output[0]
|
| 263 |
+
ta = InputTensorArray(data_input_id, index_input_id, unstacked_ta_consumer, self.g)
|
| 264 |
+
context.loop_properties.add_scan_input(ta)
|
| 265 |
+
|
| 266 |
+
def _crop_loop_body_sub_graph(self, context):
|
| 267 |
+
# according to input and output, find the body graph
|
| 268 |
+
loop_props = context.loop_properties
|
| 269 |
+
inputs = loop_props.state_inputs + loop_props.scan_inputs
|
| 270 |
+
input_ids = [input_tensor_value_info.id for input_tensor_value_info in inputs]
|
| 271 |
+
|
| 272 |
+
outputs = loop_props.state_outputs + loop_props.scan_outputs
|
| 273 |
+
output_ids = [out_tensor_value_info.id for out_tensor_value_info in outputs]
|
| 274 |
+
ops, enter_nodes, _ = self.find_subgraph(set(input_ids), set(output_ids), self.g, merge_as_end=False)
|
| 275 |
+
|
| 276 |
+
for enter_node in enter_nodes:
|
| 277 |
+
# connect Enter's output to Enter's input
|
| 278 |
+
self.g.replace_all_inputs(enter_node.output[0], enter_node.input[0], ops=ops)
|
| 279 |
+
|
| 280 |
+
return GraphInfo(ops, inputs, outputs)
|
| 281 |
+
|
| 282 |
+
def _crop_loop_condition_sub_graph(self, context):
|
| 283 |
+
input_ids = []
|
| 284 |
+
output_ids = [context.loop_cond.input[0]]
|
| 285 |
+
outputs = [TensorValueInfo(o, self.g) for o in output_ids]
|
| 286 |
+
ops, enter_nodes, merge_nodes = self.find_subgraph(set(input_ids), set(output_ids), self.g, merge_as_end=True)
|
| 287 |
+
|
| 288 |
+
for enter_node in enter_nodes:
|
| 289 |
+
# connect Enter's output to Enter's input
|
| 290 |
+
self.g.replace_all_inputs(enter_node.output[0], enter_node.input[0], ops=ops)
|
| 291 |
+
|
| 292 |
+
dependent_vars = []
|
| 293 |
+
for merge_node in merge_nodes:
|
| 294 |
+
enter_node = [n for n in merge_node.inputs if n.type == "Enter"][0]
|
| 295 |
+
loop_var = context.loop_properties.all_variables[enter_node.name]
|
| 296 |
+
|
| 297 |
+
# cut off connection between condition graph and Merge node.
|
| 298 |
+
# replace condition graph's inputs to be cell graph's outputs, because we want condition graph
|
| 299 |
+
# to consumer cell graph outputs.
|
| 300 |
+
non_switch_consumers = [n for n in self.g.find_output_consumers(merge_node.output[0]) if n.type != "Switch"]
|
| 301 |
+
self.g.replace_all_inputs(merge_node.output[0], loop_var.next_iteration_input.id,
|
| 302 |
+
ops=non_switch_consumers)
|
| 303 |
+
dependent_vars.append(loop_var)
|
| 304 |
+
|
| 305 |
+
# cut off connection between condition graph and LoopCond node.
|
| 306 |
+
self.g.replace_all_inputs(context.loop_cond.output[0], INVALID_INPUT_ID, ops=[context.loop_cond])
|
| 307 |
+
|
| 308 |
+
graph_info = GraphInfo(ops, [], outputs)
|
| 309 |
+
graph_info.dependent_vars = dependent_vars
|
| 310 |
+
return graph_info
|
| 311 |
+
|
| 312 |
+
def _cut_off_connection_for_cell(self, context):
|
| 313 |
+
for val in context.loop_properties.all_variables.values():
|
| 314 |
+
if val.switch_true_identity_output.id:
|
| 315 |
+
# remove the node to cut off a starting node of the cell (e.g. loop body).
|
| 316 |
+
n = self.g.get_node_by_output(val.switch_true_identity_output.id)
|
| 317 |
+
self.g.remove_node(n.name)
|
| 318 |
+
|
| 319 |
+
if val.is_tensor_array:
|
| 320 |
+
# connect NextIteration to an invalid node, to cut off an ending node of the cell.
|
| 321 |
+
ta_write_nodes = [n for n in self.g.get_nodes() if is_tf_tensor_array_write_op(n)]
|
| 322 |
+
self.g.replace_all_inputs(val.next_iteration_input.id, INVALID_INPUT_ID, ops=ta_write_nodes)
|
| 323 |
+
else:
|
| 324 |
+
# connect NextIteration to an invalid node, to cut off an ending node of the cell.
|
| 325 |
+
next_iter_nodes = [n for n in self.g.get_nodes() if n.type == "NextIteration"]
|
| 326 |
+
self.g.replace_all_inputs(val.next_iteration_input.id, INVALID_INPUT_ID, ops=next_iter_nodes)
|
| 327 |
+
|
| 328 |
+
for scan_input in context.loop_properties.scan_inputs:
|
| 329 |
+
# remove the node to cut off connection between scan_input and the cell.
|
| 330 |
+
self.g.remove_node(self.g.get_node_by_output(scan_input.id).name)
|
| 331 |
+
|
| 332 |
+
def _get_loop_var_from_switch(self, switch_node):
|
| 333 |
+
if switch_node.type != 'Switch':
|
| 334 |
+
logger.error("not a switch node, skip")
|
| 335 |
+
return None
|
| 336 |
+
|
| 337 |
+
# the first input is data
|
| 338 |
+
merge_node = switch_node.inputs[0]
|
| 339 |
+
if merge_node.type != "Merge":
|
| 340 |
+
logger.error("switch node does not has Merge as its first input")
|
| 341 |
+
return None
|
| 342 |
+
|
| 343 |
+
# find the output_true consumers
|
| 344 |
+
switch_consumers = self.g.find_output_consumers(switch_node.output[1])
|
| 345 |
+
switch_true_consumer_cnt = len(switch_consumers)
|
| 346 |
+
if switch_true_consumer_cnt == 0:
|
| 347 |
+
switch_true_identity_output = None
|
| 348 |
+
elif switch_true_consumer_cnt == 1:
|
| 349 |
+
if switch_consumers[0].type == "Identity":
|
| 350 |
+
switch_true_identity_output = switch_consumers[0].output[0]
|
| 351 |
+
else:
|
| 352 |
+
# using grappler there is not necessarily an identity behind switch
|
| 353 |
+
switch_true_identity_output = switch_node.output[1]
|
| 354 |
+
else:
|
| 355 |
+
# insert identity if there are 2 or more consumers. This can happen on tf-1.15.
|
| 356 |
+
switch_true_identity_output = self.g.make_node("Identity", [switch_node.output[1]],
|
| 357 |
+
shapes=[switch_node.output_shapes[1]],
|
| 358 |
+
dtypes=[switch_node.output_dtypes[1]])
|
| 359 |
+
switch_true_identity_output = switch_true_identity_output.output[0]
|
| 360 |
+
for n in switch_consumers:
|
| 361 |
+
for i, nn in enumerate(n.input):
|
| 362 |
+
if nn == switch_node.output[1]:
|
| 363 |
+
n.input[i] = switch_true_identity_output
|
| 364 |
+
|
| 365 |
+
target_node_input_id = None
|
| 366 |
+
enter_node = [n for n in merge_node.inputs if n.type == 'Enter'][0]
|
| 367 |
+
target_node_input_id = enter_node.input[0]
|
| 368 |
+
logger.debug("a Switch >> Merge >> Enter is found called %s", enter_node.inputs[0].name)
|
| 369 |
+
|
| 370 |
+
next_iteration_node = [n for n in merge_node.inputs if n.type == 'NextIteration'][0]
|
| 371 |
+
last_iteration_output_id = next_iteration_node.input[0]
|
| 372 |
+
|
| 373 |
+
# find the output_false consumers to see whether there is consumer for this var
|
| 374 |
+
switch_false_consumers = self.g.find_output_consumers(switch_node.output[0])
|
| 375 |
+
false_consumer_count = len(switch_false_consumers)
|
| 376 |
+
exit_output_id = None
|
| 377 |
+
if false_consumer_count == 1:
|
| 378 |
+
exit_node = switch_false_consumers[0]
|
| 379 |
+
if exit_node.type != "Exit":
|
| 380 |
+
raise ValueError("switch false branch is followed by non-Exit")
|
| 381 |
+
exit_output_id = exit_node.output[0]
|
| 382 |
+
elif false_consumer_count == 0:
|
| 383 |
+
# sometime, the variable output won't be used in the new iteration as input.
|
| 384 |
+
exit_output_id = None
|
| 385 |
+
else:
|
| 386 |
+
raise ValueError("unexpected number of switch false consumers")
|
| 387 |
+
|
| 388 |
+
is_ta = False
|
| 389 |
+
ta_index_id = None
|
| 390 |
+
if is_tf_tensor_array_op(self.g.get_node_by_output(target_node_input_id)):
|
| 391 |
+
is_ta = True
|
| 392 |
+
|
| 393 |
+
ta_write_node = self.g.get_node_by_output(last_iteration_output_id)
|
| 394 |
+
utils.make_sure(is_tf_tensor_array_write_op(ta_write_node), "ta nextiteration is not following ta write op")
|
| 395 |
+
last_iteration_output_id = ta_write_node.input[2]
|
| 396 |
+
ta_index_id = ta_write_node.input[1]
|
| 397 |
+
|
| 398 |
+
# here we parse patterns generated by
|
| 399 |
+
# ta.write(), then ta.stack(), because this is the most frequent usage pattern.
|
| 400 |
+
if exit_output_id:
|
| 401 |
+
exit_consumers = self.g.find_output_consumers(exit_output_id)
|
| 402 |
+
ta_gather_node = [n for n in exit_consumers if is_tf_tensor_array_gather_op(n)][0]
|
| 403 |
+
|
| 404 |
+
# update exit output id, treat the gather output as ta's output
|
| 405 |
+
exit_output_id = ta_gather_node.output[0]
|
| 406 |
+
|
| 407 |
+
loop_var = LoopVariable(enter_node.name, target_node_input_id, last_iteration_output_id,
|
| 408 |
+
switch_true_identity_output, exit_output_id, is_ta, ta_index_id, self.g)
|
| 409 |
+
|
| 410 |
+
return loop_var
|
| 411 |
+
|
| 412 |
+
@staticmethod
|
| 413 |
+
def find_subgraph(input_ids, output_ids, g, merge_as_end=False):
|
| 414 |
+
logger.debug("input ids %s ", input_ids)
|
| 415 |
+
logger.debug("output ids %s ", output_ids)
|
| 416 |
+
|
| 417 |
+
enter_nodes = set()
|
| 418 |
+
merge_nodes = set()
|
| 419 |
+
|
| 420 |
+
def find_input_boundary(node):
|
| 421 |
+
if node.type == "Enter":
|
| 422 |
+
enter_nodes.add(node)
|
| 423 |
+
logger.debug("terminate the input search at %s", node.name)
|
| 424 |
+
return False
|
| 425 |
+
|
| 426 |
+
if merge_as_end is True and node.type == "Merge":
|
| 427 |
+
merge_nodes.add(node)
|
| 428 |
+
logger.debug("terminate the input search at %s", node.name)
|
| 429 |
+
return False
|
| 430 |
+
|
| 431 |
+
if node.is_const():
|
| 432 |
+
logger.debug("terminate search at const node %s", node.name)
|
| 433 |
+
return False
|
| 434 |
+
|
| 435 |
+
for o in node.output:
|
| 436 |
+
if o in input_ids:
|
| 437 |
+
return False
|
| 438 |
+
return True
|
| 439 |
+
|
| 440 |
+
nodes = g.extract_sub_graph_nodes(output_ids, input_checker=find_input_boundary)
|
| 441 |
+
return nodes, enter_nodes, merge_nodes
|
| 442 |
+
|
| 443 |
+
@staticmethod
|
| 444 |
+
def construct_graph_from_nodes(parent_g, nodes, outputs):
|
| 445 |
+
return utils.construct_graph_from_nodes(
|
| 446 |
+
parent_g,
|
| 447 |
+
nodes,
|
| 448 |
+
[out.id for out in outputs],
|
| 449 |
+
[out.shape for out in outputs],
|
| 450 |
+
[out.dtype for out in outputs]
|
| 451 |
+
)
|
lib/python3.10/site-packages/tf2onnx/rewriter/lstm_rewriter.py
ADDED
|
@@ -0,0 +1,433 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
"""
|
| 5 |
+
tf2onnx.rewriter.lstm_rewriter
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
from __future__ import division
|
| 9 |
+
from __future__ import print_function
|
| 10 |
+
from __future__ import unicode_literals
|
| 11 |
+
|
| 12 |
+
import logging
|
| 13 |
+
import numpy as np
|
| 14 |
+
from tf2onnx import utils
|
| 15 |
+
from tf2onnx.graph_builder import GraphBuilder
|
| 16 |
+
from tf2onnx.rewriter.rnn_utils import RNNUnitType, get_weights_from_const_node
|
| 17 |
+
from tf2onnx.utils import is_tf_concat_op, is_tf_slice_op
|
| 18 |
+
|
| 19 |
+
from tf2onnx.rewriter.lstm_rewriter_base import LSTMRewriterBase
|
| 20 |
+
|
| 21 |
+
# pylint: disable=invalid-name,unused-argument,missing-docstring
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
logger = logging.getLogger(__name__)
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
class LSTMRewriter(LSTMRewriterBase):
|
| 28 |
+
def __init__(self, g):
|
| 29 |
+
super(LSTMRewriter, self).__init__(g)
|
| 30 |
+
self.lstm_cell_type = None
|
| 31 |
+
self.num_lstm_layers = 0
|
| 32 |
+
|
| 33 |
+
def run(self):
|
| 34 |
+
logger.debug("enter lstm rewriter")
|
| 35 |
+
return super(LSTMRewriter, self).run()
|
| 36 |
+
|
| 37 |
+
def find_cell(self, context):
|
| 38 |
+
lstm_cell_types = [RNNUnitType.LSTMCell, RNNUnitType.LSTMBlockCell]
|
| 39 |
+
for cell_type in lstm_cell_types:
|
| 40 |
+
cell_match = self._match_cell(context, cell_type)
|
| 41 |
+
if cell_match and len(cell_match) >= 1:
|
| 42 |
+
self.num_lstm_layers = len(cell_match)
|
| 43 |
+
logger.debug("number of LSTM layers: %s", self.num_lstm_layers)
|
| 44 |
+
for i in range(self.num_lstm_layers):
|
| 45 |
+
self.state_variable_handlers.append({
|
| 46 |
+
"ct" + str(i): (self._ct_variable_finder, self._connect_lstm_yc_to_graph, i),
|
| 47 |
+
"ht" + str(i): (self._ht_variable_finder, self._connect_lstm_yh_to_graph, i)
|
| 48 |
+
})
|
| 49 |
+
self.state_variable_handlers.append({
|
| 50 |
+
"ct_ht" + str(i): (self._ct_ht_shared_variable_finder, self._connect_lstm_ych_to_graph, i)
|
| 51 |
+
})
|
| 52 |
+
logger.debug("parsing unit is %s, num layers is %d", cell_type, self.num_lstm_layers)
|
| 53 |
+
if cell_match:
|
| 54 |
+
self.lstm_cell_type = cell_type
|
| 55 |
+
logger.debug("parsing unit is %s", cell_type)
|
| 56 |
+
return cell_match
|
| 57 |
+
logger.debug("cannot parse unit")
|
| 58 |
+
return None
|
| 59 |
+
|
| 60 |
+
def get_weight_and_bias(self, context):
|
| 61 |
+
weight_and_bias = list()
|
| 62 |
+
for i in range(self.num_lstm_layers):
|
| 63 |
+
if self.lstm_cell_type == RNNUnitType.LSTMCell:
|
| 64 |
+
weight_and_bias.append(self._get_weight_and_bias_for_lstm_cell(context, i))
|
| 65 |
+
if self.lstm_cell_type == RNNUnitType.LSTMBlockCell:
|
| 66 |
+
weight_and_bias.append(self._get_weight_and_bias_for_lstmblock_cell(context, i))
|
| 67 |
+
return weight_and_bias
|
| 68 |
+
|
| 69 |
+
def _get_weight_and_bias_for_lstmblock_cell(self, context, i):
|
| 70 |
+
cell_match = context.cell_match[i]
|
| 71 |
+
|
| 72 |
+
w_node = cell_match.get_op("cell_kernel")
|
| 73 |
+
w = get_weights_from_const_node(self.g, w_node)
|
| 74 |
+
if w is None:
|
| 75 |
+
logger.warning("Cannot find weight, SKIP")
|
| 76 |
+
return None
|
| 77 |
+
|
| 78 |
+
b_node = cell_match.get_op("cell_bias")
|
| 79 |
+
b = get_weights_from_const_node(self.g, b_node)
|
| 80 |
+
if b is None or b.shape[0] != w.shape[1]:
|
| 81 |
+
logger.warning("cell_kernel and cell_bias's dimension doesn't match, SKIP")
|
| 82 |
+
return None
|
| 83 |
+
|
| 84 |
+
lstm_block_cell = cell_match.get_op("lstm_block_cell")
|
| 85 |
+
ft_bias_val = np.array(
|
| 86 |
+
lstm_block_cell.get_attr("forget_bias").f,
|
| 87 |
+
dtype=b.dtype
|
| 88 |
+
)
|
| 89 |
+
|
| 90 |
+
return {
|
| 91 |
+
"weight": w,
|
| 92 |
+
"bias": b,
|
| 93 |
+
"ft_bias": ft_bias_val
|
| 94 |
+
}
|
| 95 |
+
|
| 96 |
+
def _get_weight_and_bias_for_lstm_cell(self, context, i):
|
| 97 |
+
match = context.cell_match[i]
|
| 98 |
+
|
| 99 |
+
w_e = match.get_op("cell_kernel")
|
| 100 |
+
w = get_weights_from_const_node(self.g, w_e)
|
| 101 |
+
if w is None or w.size == 0:
|
| 102 |
+
return None
|
| 103 |
+
|
| 104 |
+
# check https://www.tensorflow.org/versions/r1.8/api_docs/cc/class/tensorflow/ops/bias-add
|
| 105 |
+
# for bias_add data format
|
| 106 |
+
bias_add = match.get_op("bias_add")
|
| 107 |
+
if bias_add is not None and bias_add.data_format != "NHWC":
|
| 108 |
+
logger.debug("BiasAdd data_format is not NHWC, SKIP")
|
| 109 |
+
return None
|
| 110 |
+
|
| 111 |
+
b_e = match.get_op("cell_bias")
|
| 112 |
+
if b_e is None:
|
| 113 |
+
b = np.array([0 for i in range(len(w[0]))]).astype(w.dtype)
|
| 114 |
+
else:
|
| 115 |
+
b = get_weights_from_const_node(self.g, b_e)
|
| 116 |
+
if b is None or b.shape[0] != w.shape[1]:
|
| 117 |
+
logger.warning("cell_kernel and cell_bias's dimensions does not match, skip")
|
| 118 |
+
return None
|
| 119 |
+
|
| 120 |
+
ft_bias_node = match.get_op("ft_bias")
|
| 121 |
+
ft_bias = get_weights_from_const_node(self.g, ft_bias_node)
|
| 122 |
+
if ft_bias is None:
|
| 123 |
+
return None
|
| 124 |
+
|
| 125 |
+
if not b.dtype == ft_bias.dtype:
|
| 126 |
+
return None
|
| 127 |
+
|
| 128 |
+
return {
|
| 129 |
+
"weight": w,
|
| 130 |
+
"bias": b,
|
| 131 |
+
"ft_bias": ft_bias
|
| 132 |
+
}
|
| 133 |
+
|
| 134 |
+
def parse_attributes(self, context):
|
| 135 |
+
if self.lstm_cell_type == RNNUnitType.LSTMBlockCell:
|
| 136 |
+
lstm_block_cell = context.cell_match[0].get_op("lstm_block_cell")
|
| 137 |
+
clip = float(lstm_block_cell.get_attr("cell_clip").f)
|
| 138 |
+
# current LSTM op cannot handle clip
|
| 139 |
+
if clip > 0:
|
| 140 |
+
return False
|
| 141 |
+
|
| 142 |
+
use_peephole = lstm_block_cell.get_attr_value("use_peephole")
|
| 143 |
+
if use_peephole:
|
| 144 |
+
return False
|
| 145 |
+
return True
|
| 146 |
+
|
| 147 |
+
def _ct_variable_finder(self, context, i):
|
| 148 |
+
if self.lstm_cell_type == RNNUnitType.LSTMCell:
|
| 149 |
+
lstm_cell = context.cell_match[i]
|
| 150 |
+
return self._find_state_variable_with_select(
|
| 151 |
+
context,
|
| 152 |
+
lstm_cell.get_op("ct").output[0],
|
| 153 |
+
[lstm_cell.get_op("ct_identity_consumer")]
|
| 154 |
+
)
|
| 155 |
+
if self.lstm_cell_type == RNNUnitType.LSTMBlockCell:
|
| 156 |
+
lstm_block_cell = context.cell_match[i].get_op("lstm_block_cell")
|
| 157 |
+
return self._find_state_variable_with_select(
|
| 158 |
+
context,
|
| 159 |
+
lstm_block_cell.output[1],
|
| 160 |
+
[lstm_block_cell]
|
| 161 |
+
)
|
| 162 |
+
return None
|
| 163 |
+
|
| 164 |
+
def _ht_variable_finder(self, context, i):
|
| 165 |
+
if self.lstm_cell_type == RNNUnitType.LSTMCell:
|
| 166 |
+
lstm_cell = context.cell_match[i]
|
| 167 |
+
return self._find_state_variable_with_select(
|
| 168 |
+
context,
|
| 169 |
+
lstm_cell.get_op("ht").output[0],
|
| 170 |
+
[lstm_cell.get_op("xh")]
|
| 171 |
+
)
|
| 172 |
+
if self.lstm_cell_type == RNNUnitType.LSTMBlockCell:
|
| 173 |
+
lstm_block_cell = context.cell_match[i].get_op("lstm_block_cell")
|
| 174 |
+
return self._find_state_variable_with_select(
|
| 175 |
+
context,
|
| 176 |
+
lstm_block_cell.output[6],
|
| 177 |
+
[lstm_block_cell]
|
| 178 |
+
)
|
| 179 |
+
return None
|
| 180 |
+
|
| 181 |
+
def _ct_ht_shared_variable_finder(self, context, i):
|
| 182 |
+
if self.lstm_cell_type == RNNUnitType.LSTMBlockCell:
|
| 183 |
+
return None
|
| 184 |
+
|
| 185 |
+
lstm_cell = context.cell_match[i]
|
| 186 |
+
ct = lstm_cell.get_op("ct").output[0]
|
| 187 |
+
ht = lstm_cell.get_op("ht").output[0]
|
| 188 |
+
ct_concat = [c for c in self.g.find_output_consumers(ct) if is_tf_concat_op(c)]
|
| 189 |
+
ht_concat = [c for c in self.g.find_output_consumers(ht) if is_tf_concat_op(c)]
|
| 190 |
+
if len(ct_concat) != 1 or len(ht_concat) != 1 or ct_concat[0] != ht_concat[0]:
|
| 191 |
+
logger.debug("failed to find ct-ht concat")
|
| 192 |
+
return None
|
| 193 |
+
ct_ht_shared_output = ct_concat[0].output[0]
|
| 194 |
+
|
| 195 |
+
consumers = []
|
| 196 |
+
ct_identity_consumer = lstm_cell.get_op("ct_identity_consumer")
|
| 197 |
+
ht_identity_consumer = lstm_cell.get_op("xh")
|
| 198 |
+
ct_slice = [c for c in ct_identity_consumer.inputs if is_tf_slice_op(c)]
|
| 199 |
+
ht_slice = [c for c in ht_identity_consumer.inputs if is_tf_slice_op(c)]
|
| 200 |
+
if len(ct_slice) != 1 or len(ht_slice) != 1:
|
| 201 |
+
logger.debug("failed to find slice op before identity consumers")
|
| 202 |
+
return None
|
| 203 |
+
consumers.extend([ct_slice[0], ht_slice[0]])
|
| 204 |
+
|
| 205 |
+
return self._find_state_variable_with_select(
|
| 206 |
+
context,
|
| 207 |
+
ct_ht_shared_output,
|
| 208 |
+
consumers
|
| 209 |
+
)
|
| 210 |
+
|
| 211 |
+
def is_valid(self, context):
|
| 212 |
+
# except for ct, ht or ct_ht, there are at most 2 state variables
|
| 213 |
+
if len(context.loop_properties.state_variables) - \
|
| 214 |
+
len(context.state_variables) > 2:
|
| 215 |
+
return False
|
| 216 |
+
|
| 217 |
+
# output is no more than 1
|
| 218 |
+
outputs = context.loop_properties.scan_outputs_exits
|
| 219 |
+
if len(outputs) > 1:
|
| 220 |
+
logger.debug("found %d outputs for lstm: %s", len(outputs), outputs)
|
| 221 |
+
return False
|
| 222 |
+
return True
|
| 223 |
+
|
| 224 |
+
def process_weights_and_bias_per_layer(self, context, i):
|
| 225 |
+
weights = context.weights[i]
|
| 226 |
+
w_r_icfo = weights["weight"]
|
| 227 |
+
w_dtype = weights["weight"].dtype
|
| 228 |
+
b_r_icfo = weights["bias"]
|
| 229 |
+
b_dtype = weights["bias"].dtype
|
| 230 |
+
ft_bias_scalar = weights["ft_bias"]
|
| 231 |
+
|
| 232 |
+
# split bias for each hidden unit
|
| 233 |
+
# b_r_icfo: (4 * num_units,)
|
| 234 |
+
bias_dim = b_r_icfo.shape[0]
|
| 235 |
+
hidden_size = int(bias_dim / 4)
|
| 236 |
+
b_r_icfo = np.reshape(b_r_icfo, (1, bias_dim))
|
| 237 |
+
bias_gates = np.split(b_r_icfo, 4, axis=1)
|
| 238 |
+
ft_bias = np.add(bias_gates[2], ft_bias_scalar)
|
| 239 |
+
wb_bias_iofc = np.concatenate((bias_gates[0], bias_gates[3], ft_bias, bias_gates[1]), axis=1)
|
| 240 |
+
|
| 241 |
+
# fill Rb with empty since in TF, we have only one bias.
|
| 242 |
+
rb_bias_iofc = np.zeros((1, bias_dim), dtype=b_dtype)
|
| 243 |
+
B = np.concatenate((wb_bias_iofc, rb_bias_iofc), axis=1)
|
| 244 |
+
assert B.shape == (1, 2 * bias_dim)
|
| 245 |
+
|
| 246 |
+
[wx, wh] = np.split(w_r_icfo, [-1 * hidden_size])
|
| 247 |
+
input_size = wx.shape[0]
|
| 248 |
+
assert wx.shape[0] == input_size
|
| 249 |
+
assert int(wx.shape[1] / 4) == hidden_size
|
| 250 |
+
|
| 251 |
+
# split weight for gates
|
| 252 |
+
w_gates = np.split(wx, 4, axis=1)
|
| 253 |
+
new_wx = np.concatenate((w_gates[0], w_gates[3], w_gates[2], w_gates[1]), axis=1)
|
| 254 |
+
|
| 255 |
+
h_gates = np.split(wh, 4, axis=1)
|
| 256 |
+
new_wh = np.concatenate((h_gates[0], h_gates[3], h_gates[2], h_gates[1]), axis=1)
|
| 257 |
+
W_iofc = np.transpose(new_wx)
|
| 258 |
+
R_iofc = np.transpose(new_wh)
|
| 259 |
+
|
| 260 |
+
W = np.array([W_iofc], w_dtype)
|
| 261 |
+
R = np.array([R_iofc], w_dtype)
|
| 262 |
+
|
| 263 |
+
# create node
|
| 264 |
+
w_name = utils.make_name("W" + str(i))
|
| 265 |
+
w_node = self.g.make_const(w_name, W, skip_conversion=True)
|
| 266 |
+
|
| 267 |
+
r_name = utils.make_name("R" + str(i))
|
| 268 |
+
r_node = self.g.make_const(r_name, R, skip_conversion=True)
|
| 269 |
+
|
| 270 |
+
b_name = utils.make_name("B" + str(i))
|
| 271 |
+
b_node = self.g.make_const(b_name, B, skip_conversion=True)
|
| 272 |
+
|
| 273 |
+
context.input_size[i] = input_size
|
| 274 |
+
context.hidden_size[i] = hidden_size
|
| 275 |
+
context.onnx_input_ids[i]["W"] = w_node.output[0]
|
| 276 |
+
context.onnx_input_ids[i]["R"] = r_node.output[0]
|
| 277 |
+
context.onnx_input_ids[i]["B"] = b_node.output[0]
|
| 278 |
+
|
| 279 |
+
def process_weights_and_bias(self, context):
|
| 280 |
+
for i in range(self.num_lstm_layers):
|
| 281 |
+
self.process_weights_and_bias_per_layer(context, i)
|
| 282 |
+
|
| 283 |
+
def process_var_init_nodes(self, context):
|
| 284 |
+
for i in range(self.num_lstm_layers):
|
| 285 |
+
self.process_var_init_nodes_per_layer(context, i)
|
| 286 |
+
|
| 287 |
+
def process_var_init_nodes_per_layer(self, context, i):
|
| 288 |
+
init_h_id = None
|
| 289 |
+
init_c_id = None
|
| 290 |
+
if "ct_ht" + str(i) in context.state_variables:
|
| 291 |
+
init_h_id, init_c_id = self._process_non_tuple_ch_init_nodes(context, i)
|
| 292 |
+
elif "ct" + str(i) in context.state_variables and ("ht" + str(i)) in context.state_variables:
|
| 293 |
+
init_h_id, init_c_id = self._process_tuple_ch_init_nodes(context, i)
|
| 294 |
+
else:
|
| 295 |
+
raise ValueError("no initializers, unexpected")
|
| 296 |
+
assert init_h_id and init_c_id
|
| 297 |
+
context.onnx_input_ids[i]["initial_h"] = init_h_id
|
| 298 |
+
context.onnx_input_ids[i]["initial_c"] = init_c_id
|
| 299 |
+
|
| 300 |
+
def _process_non_tuple_ch_init_nodes(self, context, i):
|
| 301 |
+
gb = GraphBuilder(self.g)
|
| 302 |
+
input_id = context.state_variables["ct_ht" + str(i)].enter_input_id
|
| 303 |
+
hidden_size = context.hidden_size[i]
|
| 304 |
+
|
| 305 |
+
attr = {"axes": [1], "starts": [0], "ends": [hidden_size]}
|
| 306 |
+
inputs_map = {"data": input_id, **attr}
|
| 307 |
+
slice_node1 = GraphBuilder(self.g).make_slice(inputs_map)
|
| 308 |
+
unsqueeze_node_1 = gb.make_unsqueeze({'data': slice_node1, "axes": [0]}, return_node=True)
|
| 309 |
+
|
| 310 |
+
attr = {"axes": [1], "starts": [hidden_size], "ends": [hidden_size * 2]}
|
| 311 |
+
inputs_map = {"data": input_id, **attr}
|
| 312 |
+
slice_node2 = GraphBuilder(self.g).make_slice(inputs_map)
|
| 313 |
+
unsqueeze_node_2 = gb.make_unsqueeze({'data': slice_node2, "axes": [0]}, return_node=True)
|
| 314 |
+
|
| 315 |
+
return unsqueeze_node_1.output[0], unsqueeze_node_2.output[0]
|
| 316 |
+
|
| 317 |
+
def _process_tuple_ch_init_nodes(self, context, i):
|
| 318 |
+
h_init_input_id = context.state_variables["ht" + str(i)].enter_input_id
|
| 319 |
+
c_init_input_id = context.state_variables["ct" + str(i)].enter_input_id
|
| 320 |
+
h_node_output = self._process_c_or_h_init_nodes(h_init_input_id, context)
|
| 321 |
+
c_node_output = self._process_c_or_h_init_nodes(c_init_input_id, context)
|
| 322 |
+
return h_node_output, c_node_output
|
| 323 |
+
|
| 324 |
+
def _process_c_or_h_init_nodes(self, initializer_input_id, context):
|
| 325 |
+
node = self.g.get_node_by_output(initializer_input_id)
|
| 326 |
+
if node.is_const():
|
| 327 |
+
val = node.get_tensor_value(as_list=False)
|
| 328 |
+
initial_name = utils.make_name("Const")
|
| 329 |
+
new_val = np.expand_dims(val, axis=0)
|
| 330 |
+
const_node = self.g.make_const(initial_name, new_val)
|
| 331 |
+
return const_node.output[0]
|
| 332 |
+
|
| 333 |
+
gb = GraphBuilder(self.g)
|
| 334 |
+
squeeze_node = gb.make_unsqueeze({'data': initializer_input_id, "axes": [0]}, return_node=True)
|
| 335 |
+
to_replace = [n for n in self.g.get_nodes() if n != squeeze_node]
|
| 336 |
+
self.g.replace_all_inputs(initializer_input_id, squeeze_node.output[0], ops=to_replace)
|
| 337 |
+
return squeeze_node.output[0]
|
| 338 |
+
|
| 339 |
+
def create_single_rnn_node(self, context, i):
|
| 340 |
+
# specify if the RNN is forward, reverse, or bidirectional.
|
| 341 |
+
# Must be one of forward (default), reverse, or bidirectional.
|
| 342 |
+
# Here we won't mark bidirectional/reverse, we will have another rewriter running
|
| 343 |
+
# after this one, which will based on patterns to combine a forward LSTM and a
|
| 344 |
+
# backward LSTM into a bidirectional one.
|
| 345 |
+
num_direction = 1
|
| 346 |
+
# todo: input_forget
|
| 347 |
+
context.attributes[i]["direction"] = "forward"
|
| 348 |
+
context.attributes[i]["hidden_size"] = context.hidden_size[i]
|
| 349 |
+
inputs = context.onnx_input_ids[i]
|
| 350 |
+
lstm_inputs = [
|
| 351 |
+
inputs["X"], inputs["W"], inputs["R"], inputs["B"],
|
| 352 |
+
inputs["sequence_lens"], inputs["initial_h"], inputs["initial_c"]]
|
| 353 |
+
|
| 354 |
+
x_shape = self.g.get_shape(lstm_inputs[0])
|
| 355 |
+
x_seq_length = x_shape[0]
|
| 356 |
+
x_batch_size = x_shape[1]
|
| 357 |
+
out_dtype = self.g.get_dtype(lstm_inputs[0])
|
| 358 |
+
|
| 359 |
+
lstm_node = self.g.make_node("LSTM", lstm_inputs, attr=context.attributes[i], output_count=3,
|
| 360 |
+
shapes=[[x_seq_length, num_direction, x_batch_size, context.hidden_size[i]],
|
| 361 |
+
[num_direction, x_batch_size, context.hidden_size[i]],
|
| 362 |
+
[num_direction, x_batch_size, context.hidden_size[i]]],
|
| 363 |
+
dtypes=[out_dtype, out_dtype, out_dtype], op_name_scope=context.rnn_scope)
|
| 364 |
+
return lstm_node
|
| 365 |
+
|
| 366 |
+
def create_rnn_node(self, context):
|
| 367 |
+
gb = GraphBuilder(self.g)
|
| 368 |
+
rnn_nodes = list()
|
| 369 |
+
outputs = context.loop_properties.scan_outputs_exits
|
| 370 |
+
logger.debug("number of rnn node outputs: %s", len(outputs))
|
| 371 |
+
|
| 372 |
+
for i in range(self.num_lstm_layers):
|
| 373 |
+
logger.debug("creating rnn node for layer: %s", i)
|
| 374 |
+
rnn_nodes.append(self.create_single_rnn_node(context, i))
|
| 375 |
+
output_id = rnn_nodes[i].output[0]
|
| 376 |
+
rnn_output_shape = self.g.get_shape(output_id)
|
| 377 |
+
squeeze_output_shape = [rnn_output_shape[0], rnn_output_shape[2], rnn_output_shape[3]]
|
| 378 |
+
squeeze_node = gb.make_squeeze({"data": output_id, "axes": [1]},
|
| 379 |
+
shapes=[squeeze_output_shape],
|
| 380 |
+
dtypes=[self.g.get_dtype(output_id)],
|
| 381 |
+
return_node=True)
|
| 382 |
+
if i + 1 < self.num_lstm_layers:
|
| 383 |
+
logger.debug("setting input for layer: %s", i + 1)
|
| 384 |
+
context.onnx_input_ids[i + 1]["X"] = squeeze_node.output[0]
|
| 385 |
+
return rnn_nodes
|
| 386 |
+
|
| 387 |
+
def _connect_lstm_yh_to_graph(self, context, i):
|
| 388 |
+
# in tf, y_h output shape is: [batch, hidden]
|
| 389 |
+
# in onnx, output shape is: [number_directions, batch, hidden]
|
| 390 |
+
gb = GraphBuilder(self.g)
|
| 391 |
+
exit_output = context.state_variables["ht" + str(i)].exit_output
|
| 392 |
+
output_id = context.rnn_node[i].output[1]
|
| 393 |
+
lstm_yh_shape = self.g.get_shape(output_id)
|
| 394 |
+
squeeze_node = gb.make_squeeze({"data": output_id, "axes": [0]},
|
| 395 |
+
shapes=[[lstm_yh_shape[1], lstm_yh_shape[2]]],
|
| 396 |
+
dtypes=[self.g.get_dtype(output_id)],
|
| 397 |
+
return_node=True)
|
| 398 |
+
|
| 399 |
+
self.g.replace_all_inputs(exit_output.id, squeeze_node.output[0]) # ops=self.g.get_nodes()
|
| 400 |
+
|
| 401 |
+
def _connect_lstm_yc_to_graph(self, context, i):
|
| 402 |
+
# in tf, y_c output shape is: [batch, hidden]
|
| 403 |
+
# in onnx, output shape is: [number_directions, batch, hidden]
|
| 404 |
+
gb = GraphBuilder(self.g)
|
| 405 |
+
exit_output = context.state_variables["ct" + str(i)].exit_output
|
| 406 |
+
output_id = context.rnn_node[i].output[2]
|
| 407 |
+
lstm_yc_shape = self.g.get_shape(output_id)
|
| 408 |
+
squeeze_node = gb.make_squeeze({"data": output_id, "axes": [0]},
|
| 409 |
+
shapes=[[lstm_yc_shape[1], lstm_yc_shape[2]]],
|
| 410 |
+
dtypes=[self.g.get_dtype(output_id)],
|
| 411 |
+
return_node=True)
|
| 412 |
+
|
| 413 |
+
self.g.replace_all_inputs(exit_output.id, squeeze_node.output[0]) # ops=self.g.get_nodes()
|
| 414 |
+
|
| 415 |
+
def _connect_lstm_ych_to_graph(self, context, i):
|
| 416 |
+
# in tf, concat of y_c and y_h output shape is: [batch, hidden *2]
|
| 417 |
+
# in onnx, y_c/y_h output shape is: [number_directions, batch, hidden]
|
| 418 |
+
gb = GraphBuilder(self.g)
|
| 419 |
+
exit_output = context.state_variables["ct_ht" + str(i)].exit_output
|
| 420 |
+
lstm_node = context.rnn_node[i]
|
| 421 |
+
yc_shape = self.g.get_shape(lstm_node.output[2])
|
| 422 |
+
concat_output_shape = [yc_shape[0], yc_shape[1], yc_shape[2] * 2]
|
| 423 |
+
concat = self.g.make_node("Concat", [lstm_node.output[2], lstm_node.output[1]],
|
| 424 |
+
attr={"axis": 2}, shapes=[concat_output_shape],
|
| 425 |
+
dtypes=[self.g.get_dtype(lstm_node.output[2])])
|
| 426 |
+
|
| 427 |
+
squeeze_output_shape = [concat_output_shape[1], concat_output_shape[2]]
|
| 428 |
+
squeeze_node = gb.make_squeeze({'data': concat.output[0], "axes": [0]},
|
| 429 |
+
shapes=[squeeze_output_shape],
|
| 430 |
+
dtypes=[self.g.get_dtype(concat.output[0])],
|
| 431 |
+
return_node=True)
|
| 432 |
+
|
| 433 |
+
self.g.replace_all_inputs(exit_output.id, squeeze_node.output[0]) # ops=self.g.get_nodes()
|
lib/python3.10/site-packages/tf2onnx/rewriter/lstm_rewriter_base.py
ADDED
|
@@ -0,0 +1,190 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
# Temporary base class exclusive for LSTMs for stacked LSTM layer support.
|
| 5 |
+
# Once GRU, BiLSTM, BiGRU re-writers will also be enhanced for stacked layer support
|
| 6 |
+
# this will be combined with unit rnn base class.
|
| 7 |
+
|
| 8 |
+
"""
|
| 9 |
+
tf2onnx.rewriter.lstm_rewriter_base
|
| 10 |
+
"""
|
| 11 |
+
|
| 12 |
+
from __future__ import division
|
| 13 |
+
from __future__ import print_function
|
| 14 |
+
import logging
|
| 15 |
+
|
| 16 |
+
from tf2onnx import utils
|
| 17 |
+
from tf2onnx.graph_builder import GraphBuilder
|
| 18 |
+
from tf2onnx.rewriter.loop_rewriter_base import LoopRewriterBase
|
| 19 |
+
from tf2onnx.rewriter.rnn_utils import get_pattern
|
| 20 |
+
from tf2onnx.graph_matcher import GraphMatcher
|
| 21 |
+
from tf2onnx.rewriter.unit_rnn_rewriter_base import UnitRnnRewriterBase, UnitRnnContext
|
| 22 |
+
|
| 23 |
+
logger = logging.getLogger(__name__)
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
# pylint: disable=missing-docstring,invalid-name,unused-argument,using-constant-test,broad-except,protected-access,W0223
|
| 27 |
+
|
| 28 |
+
class LSTMContext(UnitRnnContext):
|
| 29 |
+
def __init__(self):
|
| 30 |
+
super(LSTMContext, self).__init__()
|
| 31 |
+
self.cell_match = list() # matched cell
|
| 32 |
+
|
| 33 |
+
self.weights = list({})
|
| 34 |
+
self.input_size = list()
|
| 35 |
+
self.hidden_size = list()
|
| 36 |
+
|
| 37 |
+
self.attributes = list({}) # onnx attributes
|
| 38 |
+
# onnx inputs: List of [X, W, R, B, sequence_lens, initial_h, initial_c, P]
|
| 39 |
+
self.onnx_input_ids = list({})
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
class LSTMRewriterBase(UnitRnnRewriterBase):
|
| 43 |
+
"""
|
| 44 |
+
main procedures:
|
| 45 |
+
1 check whether extracted loop is a unit LSTM, fall back in necessity:
|
| 46 |
+
1 parse LSTM
|
| 47 |
+
2 find needed info from tensorflow graph
|
| 48 |
+
3 process found info according to ONNX requirement
|
| 49 |
+
"""
|
| 50 |
+
|
| 51 |
+
def create_context(self):
|
| 52 |
+
return LSTMContext()
|
| 53 |
+
|
| 54 |
+
def parse_unit_rnn(self, context):
|
| 55 |
+
"""
|
| 56 |
+
parse needed info from tensorflow graph:
|
| 57 |
+
1 weight
|
| 58 |
+
2 state variables used in rnn unit, such as c_t, h_t
|
| 59 |
+
3 sequence node
|
| 60 |
+
4 input_x
|
| 61 |
+
5 attributes, e.g., activation_alpha, activation_beta... optional
|
| 62 |
+
"""
|
| 63 |
+
logger.debug("parse unit rnn")
|
| 64 |
+
self.state_variable_handler = list()
|
| 65 |
+
self.state_variable_handlers = list()
|
| 66 |
+
|
| 67 |
+
logger.debug("match unit cell against loop body graph")
|
| 68 |
+
cell_match = self.find_cell(context)
|
| 69 |
+
if not cell_match:
|
| 70 |
+
logger.debug('failed to match cell pattern')
|
| 71 |
+
return False
|
| 72 |
+
cell_match.sort(key=lambda cmt: cmt.get_op("cell_kernel").name)
|
| 73 |
+
context.cell_match = cell_match
|
| 74 |
+
|
| 75 |
+
logger.debug("get_weight_and_bias starts")
|
| 76 |
+
weights = self.get_weight_and_bias(context)
|
| 77 |
+
if not weights:
|
| 78 |
+
logger.debug("rnn weights check failed, SKIP")
|
| 79 |
+
return False
|
| 80 |
+
context.weights = weights
|
| 81 |
+
|
| 82 |
+
if not self.get_state_variables(context):
|
| 83 |
+
logger.debug("no cell variable initializers found, SKIP")
|
| 84 |
+
return False
|
| 85 |
+
|
| 86 |
+
seq_len_node = self.find_sequence_length_node(context)
|
| 87 |
+
if seq_len_node:
|
| 88 |
+
logger.debug("find sequence node: %s", seq_len_node.name)
|
| 89 |
+
|
| 90 |
+
# require exact one input
|
| 91 |
+
inputs = context.loop_properties.scan_inputs_initial_values
|
| 92 |
+
if len(inputs) != 1:
|
| 93 |
+
logger.debug("found %d inputs for the unit rnn: %s",
|
| 94 |
+
len(inputs), inputs)
|
| 95 |
+
return False
|
| 96 |
+
|
| 97 |
+
for i in range(len(context.cell_match)):
|
| 98 |
+
context.onnx_input_ids.append({})
|
| 99 |
+
context.input_size.append(None)
|
| 100 |
+
context.hidden_size.append(None)
|
| 101 |
+
context.attributes.append({})
|
| 102 |
+
context.onnx_input_ids[i]["sequence_lens"] = \
|
| 103 |
+
seq_len_node.output[0] if seq_len_node else utils.ONNX_EMPTY_INPUT
|
| 104 |
+
|
| 105 |
+
context.onnx_input_ids[0]["X"] = inputs[0]
|
| 106 |
+
if not self.parse_attributes(context):
|
| 107 |
+
logger.debug("wrong attributes found")
|
| 108 |
+
return False
|
| 109 |
+
|
| 110 |
+
return True
|
| 111 |
+
|
| 112 |
+
def _match_cell(self, context, unittype):
|
| 113 |
+
"""match unit cell"""
|
| 114 |
+
for cell_pattern in get_pattern(unittype):
|
| 115 |
+
matcher = GraphMatcher(cell_pattern, allow_reorder=True)
|
| 116 |
+
|
| 117 |
+
loop_props = context.loop_properties
|
| 118 |
+
inputs = loop_props.state_inputs + loop_props.scan_inputs
|
| 119 |
+
input_ids = [input_tensor_value_info.id for input_tensor_value_info in inputs]
|
| 120 |
+
outputs = loop_props.state_outputs + loop_props.scan_outputs
|
| 121 |
+
output_ids = [out_tensor_value_info.id for out_tensor_value_info in outputs]
|
| 122 |
+
body_graph_ops, _, _ = LoopRewriterBase.find_subgraph(
|
| 123 |
+
set(input_ids),
|
| 124 |
+
set(output_ids),
|
| 125 |
+
self.g, merge_as_end=True
|
| 126 |
+
)
|
| 127 |
+
|
| 128 |
+
match_results = list(matcher.match_ops(body_graph_ops))
|
| 129 |
+
logger.debug("number of match results: %s", len(match_results))
|
| 130 |
+
if len(match_results) > 0:
|
| 131 |
+
return match_results
|
| 132 |
+
return None
|
| 133 |
+
|
| 134 |
+
def get_state_variables(self, context):
|
| 135 |
+
"""
|
| 136 |
+
Get state variables by provided handlers. There maybe several handlers corresponding to
|
| 137 |
+
different patterns of state variables.
|
| 138 |
+
The commone method is to find state variables from loop property according to its
|
| 139 |
+
next_iteration_input and switch_true_identity_output, see lstm_rewriter_v2
|
| 140 |
+
"""
|
| 141 |
+
contains_handler = False
|
| 142 |
+
for handler in self.state_variable_handlers:
|
| 143 |
+
can_handle = True
|
| 144 |
+
for var_name, funcs in handler.items():
|
| 145 |
+
finder = funcs[0]
|
| 146 |
+
state_variable = finder(context, funcs[2])
|
| 147 |
+
if state_variable:
|
| 148 |
+
logger.debug("found state variable %s", var_name)
|
| 149 |
+
context.state_variables[var_name] = state_variable
|
| 150 |
+
else:
|
| 151 |
+
logger.debug("failed to get state variable %s", var_name)
|
| 152 |
+
can_handle = False
|
| 153 |
+
break
|
| 154 |
+
if can_handle:
|
| 155 |
+
self.state_variable_handler.append(handler)
|
| 156 |
+
contains_handler = True
|
| 157 |
+
return contains_handler
|
| 158 |
+
|
| 159 |
+
def process_outputs(self, context):
|
| 160 |
+
for handler in self.state_variable_handler:
|
| 161 |
+
for var_name, funcs in handler.items():
|
| 162 |
+
output_connector = funcs[1]
|
| 163 |
+
output_connector(context, funcs[2])
|
| 164 |
+
logger.debug("connect output of %s to graph", var_name)
|
| 165 |
+
logger.debug("done handling all state variables, now focusing on final output")
|
| 166 |
+
self.connect_unit_rnn_output_to_graph(context)
|
| 167 |
+
|
| 168 |
+
def connect_unit_rnn_output_to_graph(self, context):
|
| 169 |
+
outputs = context.loop_properties.scan_outputs_exits
|
| 170 |
+
if not outputs:
|
| 171 |
+
logger.debug("no one consume output")
|
| 172 |
+
return
|
| 173 |
+
|
| 174 |
+
gb = GraphBuilder(self.g)
|
| 175 |
+
gather_output_id = outputs[0].id
|
| 176 |
+
logger.debug("found output for rnn: %s", gather_output_id)
|
| 177 |
+
|
| 178 |
+
# in tf batch major mode, output shape is : [batch, time, hidden]
|
| 179 |
+
# in time major mode, output shape is: [time, batch, hidden]
|
| 180 |
+
# in onnx, output shape is : [time, num_directions, batch, hidden]
|
| 181 |
+
|
| 182 |
+
rnn_node = context.rnn_node[len(context.rnn_node) - 1]
|
| 183 |
+
output_id = rnn_node.output[0]
|
| 184 |
+
rnn_output_shape = self.g.get_shape(output_id)
|
| 185 |
+
squeeze_output_shape = [rnn_output_shape[0], rnn_output_shape[2], rnn_output_shape[3]]
|
| 186 |
+
squeeze_node = gb.make_squeeze({'data': output_id, "axes": [1]},
|
| 187 |
+
shapes=[squeeze_output_shape],
|
| 188 |
+
dtypes=[self.g.get_dtype(output_id)],
|
| 189 |
+
return_node=True)
|
| 190 |
+
self.g.replace_all_inputs(gather_output_id, squeeze_node.output[0]) # ops=self.g.get_nodes()
|
lib/python3.10/site-packages/tf2onnx/rewriter/quantization_ops_rewriter.py
ADDED
|
@@ -0,0 +1,125 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
"""
|
| 5 |
+
tf2onnx.rewriter - rewrite tensorflow QuantizeAndDequantizeV2|QuantizeAndDequantizeV3 op
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
import numpy as np
|
| 9 |
+
from tf2onnx.graph_matcher import OpTypePattern, GraphMatcher
|
| 10 |
+
from tf2onnx import utils
|
| 11 |
+
|
| 12 |
+
# pylint: disable=missing-docstring
|
| 13 |
+
|
| 14 |
+
def extract_numpy_array(node):
|
| 15 |
+
return np.frombuffer(node.attr["value"].t.raw_data, dtype="float32")
|
| 16 |
+
|
| 17 |
+
def create_qdq_nodes(g, match_results):
|
| 18 |
+
|
| 19 |
+
for match in match_results:
|
| 20 |
+
qdq_node = match.get_op('output')
|
| 21 |
+
qdq_node_output_dtype = g.get_dtype(qdq_node.output[0])
|
| 22 |
+
qdq_node_output_shape = g.get_shape(qdq_node.output[0])
|
| 23 |
+
|
| 24 |
+
# Get the attributes of qdq node
|
| 25 |
+
narrow_range = qdq_node.attr['narrow_range'].i
|
| 26 |
+
signed_input = qdq_node.attr['signed_input'].i
|
| 27 |
+
|
| 28 |
+
min_quantized, max_quantized = [-127, 127]
|
| 29 |
+
if not narrow_range and signed_input:
|
| 30 |
+
min_quantized = -128
|
| 31 |
+
|
| 32 |
+
if not signed_input:
|
| 33 |
+
min_quantized, max_quantized = [0, 255]
|
| 34 |
+
|
| 35 |
+
# Get axis attribute for per channel implementation.
|
| 36 |
+
if 'axis' in qdq_node.attr:
|
| 37 |
+
axis = qdq_node.attr['axis'].i
|
| 38 |
+
|
| 39 |
+
# Get the min and max value of the inputs to QDQ op
|
| 40 |
+
min_value = extract_numpy_array(qdq_node.inputs[1])
|
| 41 |
+
max_value = extract_numpy_array(qdq_node.inputs[2])
|
| 42 |
+
|
| 43 |
+
num_channels = min_value.shape[0]
|
| 44 |
+
scales = np.zeros(num_channels, dtype=np.float32)
|
| 45 |
+
zero_point_dtype = np.int8 if signed_input else np.uint8
|
| 46 |
+
zero_point = np.zeros(num_channels, dtype=zero_point_dtype)
|
| 47 |
+
|
| 48 |
+
for i in range(num_channels):
|
| 49 |
+
# Calculate scales from the min and max values
|
| 50 |
+
scale_from_min_side = min_quantized/min_value[i] if min_quantized*min_value[i] > 0 else max_quantized
|
| 51 |
+
scale_from_max_side = max_quantized/max_value[i] if max_quantized*max_value[i] > 0 else max_quantized
|
| 52 |
+
|
| 53 |
+
if scale_from_min_side < scale_from_max_side:
|
| 54 |
+
scale = scale_from_min_side
|
| 55 |
+
else:
|
| 56 |
+
scale = scale_from_max_side
|
| 57 |
+
|
| 58 |
+
utils.make_sure(scale > 0, "Quantize/Dequantize scale must be greater than zero")
|
| 59 |
+
scales[i] = np.float32(scale)
|
| 60 |
+
|
| 61 |
+
# Set scalars for scale and zero point for per layer quantization
|
| 62 |
+
if num_channels == 1:
|
| 63 |
+
scales = scales[0]
|
| 64 |
+
zero_point = zero_point[0]
|
| 65 |
+
attrs = {}
|
| 66 |
+
else:
|
| 67 |
+
utils.make_sure(axis and axis != -1, "Axis must be specified for per channel quantization")
|
| 68 |
+
utils.make_sure(g.opset >= 13, "Opset >= 13 is required for per channel quantization")
|
| 69 |
+
attrs = {'axis': axis}
|
| 70 |
+
|
| 71 |
+
# Split it into QuantizeLinear and DequantizeLinear and remove the QDQ node reference
|
| 72 |
+
inverse_scale = (1/scales).astype(np.float32)
|
| 73 |
+
y_quant_scale = g.make_const(name=utils.make_name("y_quant_scale"), np_val=inverse_scale)
|
| 74 |
+
y_zero_point = g.make_const(name=utils.make_name("y_zero_point"), np_val=zero_point)
|
| 75 |
+
quant_node = g.make_node(op_type="QuantizeLinear",
|
| 76 |
+
inputs=[qdq_node.input[0], y_quant_scale.output[0],
|
| 77 |
+
y_zero_point.output[0]],
|
| 78 |
+
shapes=[qdq_node_output_shape],
|
| 79 |
+
attr=attrs,
|
| 80 |
+
dtypes=[qdq_node_output_dtype],
|
| 81 |
+
name=utils.make_name("QuantLinearNode"))
|
| 82 |
+
|
| 83 |
+
g.set_shape(quant_node.output[0], qdq_node_output_shape)
|
| 84 |
+
|
| 85 |
+
g.remove_node(qdq_node.name)
|
| 86 |
+
|
| 87 |
+
y_dequant_scale = g.make_const(name=utils.make_name("y_dequant_scale"), np_val=inverse_scale)
|
| 88 |
+
y_inv_zero_point = g.make_const(name=utils.make_name("y_inv_zero_point"), np_val=zero_point)
|
| 89 |
+
dequant_node = g.make_node(op_type="DequantizeLinear",
|
| 90 |
+
inputs=[quant_node.output[0], y_dequant_scale.output[0],
|
| 91 |
+
y_inv_zero_point.output[0]],
|
| 92 |
+
outputs=[qdq_node.output[0]],
|
| 93 |
+
shapes=[qdq_node_output_shape],
|
| 94 |
+
attr=attrs,
|
| 95 |
+
dtypes=[qdq_node_output_dtype],
|
| 96 |
+
name=utils.make_name("DequantLinearNode"))
|
| 97 |
+
g.set_shape(dequant_node.output[0], qdq_node_output_shape)
|
| 98 |
+
|
| 99 |
+
return g.get_nodes()
|
| 100 |
+
|
| 101 |
+
def rewrite_quantize_and_dequantize(g, ops):
|
| 102 |
+
|
| 103 |
+
pattern_for_qdq_v2 = \
|
| 104 |
+
OpTypePattern('QuantizeAndDequantizeV2', name='output', inputs=[
|
| 105 |
+
OpTypePattern("*"),
|
| 106 |
+
OpTypePattern(None),
|
| 107 |
+
OpTypePattern(None),
|
| 108 |
+
])
|
| 109 |
+
pattern_for_qdq_v3 = \
|
| 110 |
+
OpTypePattern('QuantizeAndDequantizeV3', name='output', inputs=[
|
| 111 |
+
OpTypePattern("*"),
|
| 112 |
+
OpTypePattern(None),
|
| 113 |
+
OpTypePattern(None),
|
| 114 |
+
OpTypePattern(None),
|
| 115 |
+
])
|
| 116 |
+
|
| 117 |
+
# Match all the patterns for QDQ ops
|
| 118 |
+
patterns = [pattern_for_qdq_v3, pattern_for_qdq_v2]
|
| 119 |
+
match_results = []
|
| 120 |
+
for pattern in patterns:
|
| 121 |
+
matcher = GraphMatcher(pattern)
|
| 122 |
+
results = list(matcher.match_ops(ops))
|
| 123 |
+
match_results.extend(results)
|
| 124 |
+
|
| 125 |
+
return create_qdq_nodes(g, match_results)
|
lib/python3.10/site-packages/tf2onnx/rewriter/random_normal_rewriter.py
ADDED
|
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
"""
|
| 5 |
+
tf2onnx.rewriter - rewrite tensorflow subgraph to onnx random normal op
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
from tf2onnx import utils
|
| 9 |
+
from tf2onnx.graph_matcher import OpTypePattern, GraphMatcher
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
# pylint: disable=missing-docstring
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def rewrite_random_normal(g, ops):
|
| 16 |
+
pattern1 = \
|
| 17 |
+
OpTypePattern('Add', name='output', inputs=[
|
| 18 |
+
OpTypePattern('Mul', name='input2', inputs=[
|
| 19 |
+
OpTypePattern('RandomStandardNormal', name='input1', inputs=["*"]), "*"
|
| 20 |
+
]), "*"
|
| 21 |
+
])
|
| 22 |
+
|
| 23 |
+
pattern2 = \
|
| 24 |
+
OpTypePattern('Identity', name='output', inputs=[
|
| 25 |
+
OpTypePattern('Identity', name='input2', inputs=[
|
| 26 |
+
OpTypePattern('RandomStandardNormal', name='input1', inputs=["*"])
|
| 27 |
+
])
|
| 28 |
+
])
|
| 29 |
+
|
| 30 |
+
pattern_list = [pattern1, pattern2]
|
| 31 |
+
for pattern in pattern_list:
|
| 32 |
+
matcher = GraphMatcher(pattern)
|
| 33 |
+
match_results = list(matcher.match_ops(ops))
|
| 34 |
+
for match in match_results:
|
| 35 |
+
output = match.get_op('output')
|
| 36 |
+
if output.type == 'Add':
|
| 37 |
+
# pattern 1
|
| 38 |
+
mean = output.inputs[1].get_tensor_value()
|
| 39 |
+
else:
|
| 40 |
+
# pattern 2
|
| 41 |
+
mean = 0.0
|
| 42 |
+
dtype = g.get_dtype(output.output[0])
|
| 43 |
+
op_name = utils.make_name("RandomNormal")
|
| 44 |
+
out_name = utils.port_name(op_name)
|
| 45 |
+
|
| 46 |
+
rn_op = match.get_op('input1')
|
| 47 |
+
seed = rn_op.get_attr('seed2').i
|
| 48 |
+
|
| 49 |
+
if rn_op.inputs[0].type == "Shape":
|
| 50 |
+
shape_node = rn_op.inputs[0]
|
| 51 |
+
new_node = g.make_node("RandomNormalLike", [shape_node.input[0]], outputs=[out_name], name=op_name,
|
| 52 |
+
attr={"mean": mean, "scale": 1.0, "dtype": dtype, "seed": float(seed)})
|
| 53 |
+
else:
|
| 54 |
+
shape = g.get_shape(output.output[0])
|
| 55 |
+
new_node = g.make_node("RandomNormal", [], outputs=[out_name], name=op_name,
|
| 56 |
+
attr={"shape": shape, "mean": mean, "scale": 1.0, "dtype": dtype, "seed": seed})
|
| 57 |
+
|
| 58 |
+
g.replace_all_inputs(output.output[0], new_node.output[0], ops=ops)
|
| 59 |
+
g.safe_remove_nodes(match.get_nodes())
|
| 60 |
+
return ops
|
lib/python3.10/site-packages/tf2onnx/rewriter/random_uniform.py
ADDED
|
@@ -0,0 +1,107 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
"""
|
| 5 |
+
tf2onnx.rewriter - rewrite tensorflow subgraph to onnx random_uniform op
|
| 6 |
+
"""
|
| 7 |
+
import numpy as np
|
| 8 |
+
from tf2onnx.graph_matcher import OpTypePattern, GraphMatcher
|
| 9 |
+
from tf2onnx import utils, handler
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
# pylint: disable=missing-docstring
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def rewrite_random_uniform(g, ops):
|
| 16 |
+
pattern = \
|
| 17 |
+
OpTypePattern('Add', name='output', inputs=[
|
| 18 |
+
OpTypePattern('Mul', inputs=[
|
| 19 |
+
OpTypePattern('RandomUniform', name='input1', inputs=["*"]),
|
| 20 |
+
OpTypePattern('Sub', name='input2', inputs=["*", "*"]),
|
| 21 |
+
]), None
|
| 22 |
+
])
|
| 23 |
+
|
| 24 |
+
matcher = GraphMatcher(pattern)
|
| 25 |
+
match_results = list(matcher.match_ops(ops))
|
| 26 |
+
for match in match_results:
|
| 27 |
+
input2 = match.get_op('input2')
|
| 28 |
+
output = match.get_op('output')
|
| 29 |
+
ru_op = match.get_op('input1')
|
| 30 |
+
# max is on input 0
|
| 31 |
+
tmax = input2.inputs[0].get_tensor_value()
|
| 32 |
+
tmin = input2.inputs[1].get_tensor_value()
|
| 33 |
+
to_delete = list(set(match.get_nodes()))
|
| 34 |
+
new_node = create_onnx_random_uniform_op(g, tmax, tmin, ru_op, output, to_delete)
|
| 35 |
+
g.replace_all_inputs(output.output[0], new_node.output[0], ops=ops)
|
| 36 |
+
g.safe_remove_nodes(to_delete)
|
| 37 |
+
|
| 38 |
+
return ops
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
# rewriter function when fold_const is enabled
|
| 42 |
+
def rewrite_random_uniform_fold_const(g, ops):
|
| 43 |
+
pattern = \
|
| 44 |
+
OpTypePattern('Add', name='output', inputs=[
|
| 45 |
+
OpTypePattern('Mul', name='mul', inputs=[
|
| 46 |
+
OpTypePattern('RandomUniform', name='input1', inputs=["*"]),
|
| 47 |
+
None,
|
| 48 |
+
]),
|
| 49 |
+
None,
|
| 50 |
+
])
|
| 51 |
+
|
| 52 |
+
matcher = GraphMatcher(pattern)
|
| 53 |
+
match_results = list(matcher.match_ops(ops))
|
| 54 |
+
for match in match_results:
|
| 55 |
+
output = match.get_op('output')
|
| 56 |
+
mul = match.get_op('mul')
|
| 57 |
+
ru_op = match.get_op('input1')
|
| 58 |
+
|
| 59 |
+
tmax_minus_tmin = mul.inputs[1].get_tensor_value()
|
| 60 |
+
tmin = output.inputs[1].get_tensor_value()
|
| 61 |
+
tmax = tmin + tmax_minus_tmin
|
| 62 |
+
to_delete = list(set(match.get_nodes()))
|
| 63 |
+
new_node = create_onnx_random_uniform_op(g, tmax, tmin, ru_op, output, to_delete)
|
| 64 |
+
g.replace_all_inputs(output.output[0], new_node.output[0], ops=ops)
|
| 65 |
+
g.safe_remove_nodes(to_delete)
|
| 66 |
+
|
| 67 |
+
return ops
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
def create_onnx_random_uniform_op(g, tmax, tmin, ru_op, output, to_delete):
|
| 71 |
+
dtype = g.get_dtype(output.output[0])
|
| 72 |
+
op_name = utils.make_name("RandomUniform")
|
| 73 |
+
shape_node = ru_op.inputs[0]
|
| 74 |
+
shape = g.get_shape(output.output[0])
|
| 75 |
+
if shape_node.is_const():
|
| 76 |
+
# if the tensorflow input (aka the shape) is const we can use the RandomUniform op
|
| 77 |
+
new_node = g.make_node("RandomUniform", [], name=op_name,
|
| 78 |
+
attr={"low": tmin, "high": tmax, "dtype": dtype, "shape": shape},
|
| 79 |
+
shapes=[shape], dtypes=[dtype])
|
| 80 |
+
else:
|
| 81 |
+
if shape_node.type == "Shape":
|
| 82 |
+
# if shape is dynamic - in tensorflow shape comes as tensor VALUE,
|
| 83 |
+
# in onnx RandomUniformLike finds takes the shape from the tensor itself.
|
| 84 |
+
# In many cases there is a shape op in tensorflow before RandomUniform and
|
| 85 |
+
# to make that work for onnx we just need to remove the shape op.
|
| 86 |
+
new_node = g.make_node("RandomUniformLike", inputs=[shape_node.input[0]], name=op_name,
|
| 87 |
+
attr={"low": tmin, "high": tmax, "dtype": dtype},
|
| 88 |
+
shapes=[shape], dtypes=[dtype])
|
| 89 |
+
else:
|
| 90 |
+
# if the shape is calculated we need to create a tensor so RandomUniformLike
|
| 91 |
+
# can take the shape from there. Pre opset9 this is somewhat hacky because there is
|
| 92 |
+
# no real fill op in onnx. In general this is not going to help performance but the tensors
|
| 93 |
+
# created are expected to be small.
|
| 94 |
+
|
| 95 |
+
# tell the caller to not delete the shape node
|
| 96 |
+
to_delete.remove(shape_node)
|
| 97 |
+
# create a fill op with the shape of the value of the input tensor
|
| 98 |
+
zero = g.make_const(utils.make_name("zero"), np.zeros((), dtype=np.float32))
|
| 99 |
+
fill_node = g.make_node("Fill", inputs=[shape_node.output[0], zero.name],
|
| 100 |
+
shapes=[shape], dtypes=[dtype])
|
| 101 |
+
func, _ = handler.tf_op.find_effective_op("Fill")
|
| 102 |
+
func(g, fill_node)
|
| 103 |
+
# and use RandomUniformLike to create the random tensor
|
| 104 |
+
new_node = g.make_node("RandomUniformLike", inputs=[fill_node.output[0]], name=op_name,
|
| 105 |
+
attr={"low": tmin, "high": tmax, "dtype": dtype},
|
| 106 |
+
shapes=[shape], dtypes=[dtype])
|
| 107 |
+
return new_node
|
lib/python3.10/site-packages/tf2onnx/rewriter/rnn.py
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
"""
|
| 5 |
+
tf2onnx.rewriter.rnn - lstm support
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
from __future__ import division
|
| 9 |
+
from __future__ import print_function
|
| 10 |
+
from __future__ import unicode_literals
|
| 11 |
+
|
| 12 |
+
import logging
|
| 13 |
+
|
| 14 |
+
from tf2onnx.rewriter.bilstm_rewriter import rewrite_bidirectional_lstms
|
| 15 |
+
from tf2onnx.rewriter.bigru_rewriter import rewrite_bidirectional_grus
|
| 16 |
+
from tf2onnx.rewriter.custom_rnn_rewriter import CustomRnnRewriter
|
| 17 |
+
from tf2onnx.rewriter.loop_rewriter import LoopRewriter
|
| 18 |
+
from tf2onnx.rewriter.lstm_rewriter import LSTMRewriter
|
| 19 |
+
from tf2onnx.rewriter.gru_rewriter import GRUUnitRewriter
|
| 20 |
+
|
| 21 |
+
# pylint: disable=invalid-name,unused-argument,missing-docstring
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
logger = logging.getLogger(__name__)
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def rewrite_single_direction_lstm(g, ops):
|
| 28 |
+
r = LSTMRewriter(g)
|
| 29 |
+
return r.run()
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def rewrite_bi_direction_lstm(g, ops):
|
| 33 |
+
return rewrite_bidirectional_lstms(g, ops)
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
def rewrite_single_direction_gru(g, ops):
|
| 37 |
+
r = GRUUnitRewriter(g)
|
| 38 |
+
return r.run()
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def rewrite_bi_direction_gru(g, ops):
|
| 42 |
+
return rewrite_bidirectional_grus(g, ops)
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
def rewrite_custom_rnn_cell(g, ops):
|
| 46 |
+
return CustomRnnRewriter(g).run()
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
def rewrite_generic_loop(g, ops):
|
| 50 |
+
return LoopRewriter(g).run()
|
lib/python3.10/site-packages/tf2onnx/rewriter/rnn_utils.py
ADDED
|
@@ -0,0 +1,585 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
"""
|
| 5 |
+
tf2onnx.rewriter.rnn_utils - rnn support
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
from __future__ import unicode_literals
|
| 9 |
+
from collections import defaultdict
|
| 10 |
+
from enum import Enum
|
| 11 |
+
|
| 12 |
+
import logging
|
| 13 |
+
import numpy as np
|
| 14 |
+
from tf2onnx import utils
|
| 15 |
+
from tf2onnx.graph_builder import GraphBuilder
|
| 16 |
+
from tf2onnx.graph_matcher import OpTypePattern # pylint: disable=unused-import
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
# pylint: disable=invalid-name,unused-argument,missing-docstring
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
logger = logging.getLogger(__name__)
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
class REWRITER_RESULT(Enum):
|
| 27 |
+
SKIP = 1
|
| 28 |
+
OK = 2
|
| 29 |
+
FAIL = 3
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
# TensorFlow LSTMCell/BasicLSTMCell computation graph matching
|
| 33 |
+
|
| 34 |
+
xc_pattern = \
|
| 35 |
+
OpTypePattern('Split', inputs=[
|
| 36 |
+
OpTypePattern("Const"), # axis for split
|
| 37 |
+
OpTypePattern("BiasAdd", name="bias_add", inputs=[
|
| 38 |
+
OpTypePattern("MatMul", inputs=[
|
| 39 |
+
OpTypePattern("ConcatV2|Concat", name="xh"),
|
| 40 |
+
OpTypePattern("Enter", inputs=[
|
| 41 |
+
OpTypePattern("*", name="cell_kernel"),
|
| 42 |
+
]),
|
| 43 |
+
]),
|
| 44 |
+
OpTypePattern("Enter", inputs=[
|
| 45 |
+
OpTypePattern("*", name="cell_bias"),
|
| 46 |
+
]),
|
| 47 |
+
]),
|
| 48 |
+
])
|
| 49 |
+
|
| 50 |
+
lstmcell_pattern = \
|
| 51 |
+
OpTypePattern('Mul', name='ht', inputs=[
|
| 52 |
+
OpTypePattern("Sigmoid", name="ot", inputs=[xc_pattern]),
|
| 53 |
+
OpTypePattern('Tanh', inputs=[
|
| 54 |
+
OpTypePattern("Add|AddV2", name="ct", inputs=[
|
| 55 |
+
OpTypePattern("Mul", name="ct_identity_consumer", inputs=[
|
| 56 |
+
OpTypePattern("Sigmoid", name="ft", inputs=[
|
| 57 |
+
OpTypePattern("Add|AddV2", inputs=[
|
| 58 |
+
xc_pattern,
|
| 59 |
+
OpTypePattern("*", name="ft_bias"),
|
| 60 |
+
]),
|
| 61 |
+
]),
|
| 62 |
+
OpTypePattern("*"),
|
| 63 |
+
]),
|
| 64 |
+
OpTypePattern("Mul", inputs=[
|
| 65 |
+
OpTypePattern("Sigmoid", name="it", inputs=[xc_pattern]),
|
| 66 |
+
OpTypePattern("Tanh", name="gt", inputs=[xc_pattern]),
|
| 67 |
+
]),
|
| 68 |
+
]),
|
| 69 |
+
]),
|
| 70 |
+
])
|
| 71 |
+
|
| 72 |
+
xc_pattern_optimized = \
|
| 73 |
+
OpTypePattern('Split', inputs=[
|
| 74 |
+
OpTypePattern("Const"),
|
| 75 |
+
OpTypePattern("Identity", inputs=[
|
| 76 |
+
OpTypePattern("MatMul", inputs=[
|
| 77 |
+
OpTypePattern("ConcatV2|Concat", name="xh"),
|
| 78 |
+
OpTypePattern("Const", name="cell_kernel"),
|
| 79 |
+
]),
|
| 80 |
+
]),
|
| 81 |
+
])
|
| 82 |
+
|
| 83 |
+
lstmcell_pattern_optimized = \
|
| 84 |
+
OpTypePattern('Mul', name='ht', inputs=[
|
| 85 |
+
OpTypePattern("Sigmoid", name="ot", inputs=[xc_pattern_optimized]),
|
| 86 |
+
OpTypePattern('Tanh', inputs=[
|
| 87 |
+
OpTypePattern("Add|AddV2", name="ct", inputs=[
|
| 88 |
+
OpTypePattern("Mul", name="ct_identity_consumer", inputs=[
|
| 89 |
+
OpTypePattern("Sigmoid", name="ft", inputs=[
|
| 90 |
+
OpTypePattern("Add|AddV2", inputs=[
|
| 91 |
+
xc_pattern_optimized,
|
| 92 |
+
OpTypePattern("*", name="ft_bias"),
|
| 93 |
+
]),
|
| 94 |
+
]),
|
| 95 |
+
OpTypePattern("*"),
|
| 96 |
+
]),
|
| 97 |
+
OpTypePattern("Mul", inputs=[
|
| 98 |
+
OpTypePattern("Sigmoid", name="it", inputs=[xc_pattern_optimized]),
|
| 99 |
+
OpTypePattern("Tanh", name="gt", inputs=[xc_pattern_optimized]),
|
| 100 |
+
]),
|
| 101 |
+
]),
|
| 102 |
+
]),
|
| 103 |
+
])
|
| 104 |
+
|
| 105 |
+
# input sequence: top to down, left to right
|
| 106 |
+
# split into update gate and reset gate
|
| 107 |
+
gru_split_pattern = \
|
| 108 |
+
OpTypePattern("Split", inputs=[
|
| 109 |
+
OpTypePattern("Const"), # split dim, a constant
|
| 110 |
+
OpTypePattern("Sigmoid", inputs=[
|
| 111 |
+
OpTypePattern("BiasAdd", inputs=[
|
| 112 |
+
OpTypePattern("Enter", inputs=[
|
| 113 |
+
OpTypePattern("*", name="gate_bias")
|
| 114 |
+
]),
|
| 115 |
+
OpTypePattern("MatMul", name="update_reset_gate", inputs=[
|
| 116 |
+
OpTypePattern("Enter", inputs=[
|
| 117 |
+
OpTypePattern("*", name="gate_kernel")
|
| 118 |
+
]),
|
| 119 |
+
OpTypePattern("ConcatV2|Concat", name="cell_inputs")
|
| 120 |
+
])
|
| 121 |
+
])
|
| 122 |
+
])
|
| 123 |
+
])
|
| 124 |
+
|
| 125 |
+
|
| 126 |
+
grucell_pattern = \
|
| 127 |
+
OpTypePattern("Add", name="cell_output", inputs=[
|
| 128 |
+
OpTypePattern("Mul", inputs=[
|
| 129 |
+
gru_split_pattern,
|
| 130 |
+
OpTypePattern("Identity")
|
| 131 |
+
]),
|
| 132 |
+
OpTypePattern("Mul", inputs=[
|
| 133 |
+
OpTypePattern("Sub", inputs=[
|
| 134 |
+
OpTypePattern("Const"), # 1-u
|
| 135 |
+
gru_split_pattern
|
| 136 |
+
]),
|
| 137 |
+
OpTypePattern("*", name="optional_activation", inputs=[
|
| 138 |
+
OpTypePattern("BiasAdd", inputs=[
|
| 139 |
+
OpTypePattern("Enter", inputs=[
|
| 140 |
+
OpTypePattern("*", name="hidden_bias")
|
| 141 |
+
]),
|
| 142 |
+
OpTypePattern("MatMul", inputs=[
|
| 143 |
+
OpTypePattern("Enter", inputs=[
|
| 144 |
+
OpTypePattern("*", name="hidden_kernel")
|
| 145 |
+
]),
|
| 146 |
+
OpTypePattern("ConcatV2|Concat")
|
| 147 |
+
])
|
| 148 |
+
])
|
| 149 |
+
])
|
| 150 |
+
])
|
| 151 |
+
])
|
| 152 |
+
|
| 153 |
+
|
| 154 |
+
cudnn_compatible_grucell_pattern = \
|
| 155 |
+
OpTypePattern("Add", name="cell_output", inputs=[
|
| 156 |
+
OpTypePattern("Mul", inputs=[
|
| 157 |
+
OpTypePattern("Sub", inputs=[
|
| 158 |
+
OpTypePattern("Const"), # 1-u
|
| 159 |
+
gru_split_pattern
|
| 160 |
+
]),
|
| 161 |
+
OpTypePattern("*", name="optional_activation", inputs=[
|
| 162 |
+
OpTypePattern("Add", inputs=[
|
| 163 |
+
OpTypePattern("Mul", inputs=[
|
| 164 |
+
gru_split_pattern,
|
| 165 |
+
OpTypePattern("BiasAdd", inputs=[
|
| 166 |
+
OpTypePattern("Enter", inputs=[
|
| 167 |
+
OpTypePattern("*", name="hidden_state_bias")
|
| 168 |
+
]),
|
| 169 |
+
OpTypePattern("MatMul", inputs=[
|
| 170 |
+
OpTypePattern("Enter", inputs=[
|
| 171 |
+
OpTypePattern("*", name="hidden_state_kernel"),
|
| 172 |
+
]),
|
| 173 |
+
OpTypePattern("Identity")
|
| 174 |
+
])
|
| 175 |
+
])
|
| 176 |
+
]),
|
| 177 |
+
OpTypePattern("BiasAdd", inputs=[
|
| 178 |
+
OpTypePattern("Enter", inputs=[
|
| 179 |
+
OpTypePattern("*", name="hidden_input_bias")
|
| 180 |
+
]),
|
| 181 |
+
OpTypePattern("MatMul", inputs=[
|
| 182 |
+
OpTypePattern("Enter", inputs=[
|
| 183 |
+
OpTypePattern("*", name="hidden_input_kernel"),
|
| 184 |
+
]),
|
| 185 |
+
OpTypePattern("*")
|
| 186 |
+
])
|
| 187 |
+
])
|
| 188 |
+
])
|
| 189 |
+
])
|
| 190 |
+
]),
|
| 191 |
+
OpTypePattern("Mul", inputs=[
|
| 192 |
+
gru_split_pattern,
|
| 193 |
+
OpTypePattern("Identity")
|
| 194 |
+
])
|
| 195 |
+
])
|
| 196 |
+
|
| 197 |
+
|
| 198 |
+
grublockcell_pattern0 = OpTypePattern("GRUBlockCell", name="gru_block_cell", inputs=[
|
| 199 |
+
OpTypePattern("*"),
|
| 200 |
+
OpTypePattern("*"),
|
| 201 |
+
OpTypePattern("Enter", inputs=[
|
| 202 |
+
OpTypePattern("*", name="gate_kernel")
|
| 203 |
+
]),
|
| 204 |
+
OpTypePattern("Enter", inputs=[
|
| 205 |
+
OpTypePattern("*", name="hidden_kernel")
|
| 206 |
+
]),
|
| 207 |
+
OpTypePattern("Enter", inputs=[
|
| 208 |
+
OpTypePattern("*", name="gate_bias")
|
| 209 |
+
]),
|
| 210 |
+
OpTypePattern("Enter", inputs=[
|
| 211 |
+
OpTypePattern("*", name="hidden_bias")
|
| 212 |
+
])
|
| 213 |
+
])
|
| 214 |
+
|
| 215 |
+
|
| 216 |
+
grublockcell_pattern1 = OpTypePattern("GRUBlockCell", name="gru_block_cell", inputs=[
|
| 217 |
+
OpTypePattern("*"),
|
| 218 |
+
OpTypePattern("*"),
|
| 219 |
+
OpTypePattern("Const", name="gate_kernel"),
|
| 220 |
+
OpTypePattern("Const", name="hidden_kernel"),
|
| 221 |
+
OpTypePattern("Const", name="gate_bias"),
|
| 222 |
+
OpTypePattern("Const", name="hidden_bias")
|
| 223 |
+
])
|
| 224 |
+
|
| 225 |
+
|
| 226 |
+
lstmblockcell_pattern = \
|
| 227 |
+
OpTypePattern("LSTMBlockCell", name="lstm_block_cell", inputs=[
|
| 228 |
+
OpTypePattern("*"),
|
| 229 |
+
OpTypePattern("*"),
|
| 230 |
+
OpTypePattern("*"),
|
| 231 |
+
OpTypePattern("Enter", inputs=[
|
| 232 |
+
OpTypePattern("*", name="cell_kernel")
|
| 233 |
+
]),
|
| 234 |
+
OpTypePattern("*", name="Pi"),
|
| 235 |
+
OpTypePattern("*", name="Pf"),
|
| 236 |
+
OpTypePattern("*", name="Po"),
|
| 237 |
+
OpTypePattern("Enter", inputs=[
|
| 238 |
+
OpTypePattern("*", name="cell_bias")
|
| 239 |
+
])
|
| 240 |
+
])
|
| 241 |
+
|
| 242 |
+
|
| 243 |
+
seq_len_pattern0 = OpTypePattern("Select|SelectV2", inputs=[
|
| 244 |
+
OpTypePattern("GreaterEqual", inputs=[
|
| 245 |
+
OpTypePattern("*"),
|
| 246 |
+
OpTypePattern("Enter", inputs=[
|
| 247 |
+
OpTypePattern("*", name="seq_len_node")
|
| 248 |
+
])
|
| 249 |
+
]),
|
| 250 |
+
OpTypePattern("*"),
|
| 251 |
+
OpTypePattern("*")
|
| 252 |
+
])
|
| 253 |
+
|
| 254 |
+
|
| 255 |
+
seq_len_pattern1 = OpTypePattern("Select|SelectV2", inputs=[
|
| 256 |
+
OpTypePattern("GreaterEqual", inputs=[
|
| 257 |
+
OpTypePattern("*"),
|
| 258 |
+
OpTypePattern("Const", name="seq_len_node")
|
| 259 |
+
]),
|
| 260 |
+
OpTypePattern("*"),
|
| 261 |
+
OpTypePattern("*")
|
| 262 |
+
])
|
| 263 |
+
|
| 264 |
+
|
| 265 |
+
class RNNUnitType(Enum):
|
| 266 |
+
LSTMCell = 0 # TF LSTMCell and BasicLSTMCell share the same pattern
|
| 267 |
+
LSTMBlockCell = 1
|
| 268 |
+
GRUCell = 2
|
| 269 |
+
GRUBlockCell = 3
|
| 270 |
+
CudnnCompatibleGRUCell = 4
|
| 271 |
+
|
| 272 |
+
|
| 273 |
+
rnn_cell_patterns = {
|
| 274 |
+
RNNUnitType.LSTMCell: [lstmcell_pattern, lstmcell_pattern_optimized],
|
| 275 |
+
RNNUnitType.LSTMBlockCell: [lstmblockcell_pattern],
|
| 276 |
+
RNNUnitType.GRUCell: [grucell_pattern],
|
| 277 |
+
RNNUnitType.GRUBlockCell: [grublockcell_pattern0, grublockcell_pattern1],
|
| 278 |
+
RNNUnitType.CudnnCompatibleGRUCell: [cudnn_compatible_grucell_pattern]
|
| 279 |
+
}
|
| 280 |
+
|
| 281 |
+
|
| 282 |
+
def get_pattern(cell_type_name):
|
| 283 |
+
return rnn_cell_patterns[cell_type_name]
|
| 284 |
+
|
| 285 |
+
|
| 286 |
+
def get_rnn_scope_name(while_scope_name):
|
| 287 |
+
parts = while_scope_name.split('/')
|
| 288 |
+
rnn_scope = '/'.join(parts[0:-2]) + "/"
|
| 289 |
+
return rnn_scope
|
| 290 |
+
|
| 291 |
+
|
| 292 |
+
def parse_rnn_loop(graph, loop_properties, rnn_scope, while_context_scope):
|
| 293 |
+
"""check if the while loop is generated by dynamic_rnn or bidirectional_rnn
|
| 294 |
+
|
| 295 |
+
Args:
|
| 296 |
+
loop_properties: LoopProperties
|
| 297 |
+
rnn_scope: rnn scope name
|
| 298 |
+
while_context_scope: while loop scope name
|
| 299 |
+
|
| 300 |
+
check a while loop is generated by dynamic_rnn or bidirectional_rnn by
|
| 301 |
+
|
| 302 |
+
1. some patterns in _time_step in dynamic_rnn: tensor array read, tensor array write
|
| 303 |
+
2. some patterns in control_flow_ops.while_loop in dynamic_rnn:
|
| 304 |
+
cond: time < loop_bound
|
| 305 |
+
loop_vars: (time, output_ta, state)
|
| 306 |
+
time has name called "time"
|
| 307 |
+
iteration_cnt is added by control flow.
|
| 308 |
+
|
| 309 |
+
be noted:
|
| 310 |
+
1. iteration counter does not exist in tf1.4 or earlier versions
|
| 311 |
+
2. if dynamic_rnn's first input is not consumed, output ta does not exist.
|
| 312 |
+
"""
|
| 313 |
+
time_name = rnn_scope + "time"
|
| 314 |
+
ta_array_name_prefix = rnn_scope + "dynamic_rnn/output_"
|
| 315 |
+
iteration_counter_name = while_context_scope + "iteration_counter"
|
| 316 |
+
|
| 317 |
+
found_time = False
|
| 318 |
+
is_rnn_out_ta = None
|
| 319 |
+
time_var = None
|
| 320 |
+
iteration_var = None
|
| 321 |
+
for val in loop_properties.all_variables.values():
|
| 322 |
+
enter_input_node = graph.get_node_by_output(val.enter_input_id)
|
| 323 |
+
if val.is_tensor_array:
|
| 324 |
+
ta_name = enter_input_node.get_attr("tensor_array_name").s.decode("utf-8")
|
| 325 |
+
if not ta_name.startswith(ta_array_name_prefix):
|
| 326 |
+
is_rnn_out_ta = False
|
| 327 |
+
elif enter_input_node.name == time_name:
|
| 328 |
+
found_time = True
|
| 329 |
+
time_var = val
|
| 330 |
+
elif enter_input_node.name == iteration_counter_name:
|
| 331 |
+
iteration_var = val
|
| 332 |
+
|
| 333 |
+
if not found_time or is_rnn_out_ta is False:
|
| 334 |
+
logger.debug("this should not be a dynamic_rnn loop, found_time: %s, is_rnn_out_ta: %s",
|
| 335 |
+
found_time, is_rnn_out_ta)
|
| 336 |
+
return None
|
| 337 |
+
|
| 338 |
+
if not loop_properties.tensor_array_inputs:
|
| 339 |
+
logger.debug("this should not be a dynamic_rnn loop, no ta input is found")
|
| 340 |
+
return None
|
| 341 |
+
|
| 342 |
+
return time_var, iteration_var
|
| 343 |
+
|
| 344 |
+
|
| 345 |
+
def get_weights_from_const_node(g, node):
|
| 346 |
+
temp = node
|
| 347 |
+
val = None
|
| 348 |
+
# this would help ignore Identity in non-const_folded graph.
|
| 349 |
+
while temp.type == 'Identity':
|
| 350 |
+
temp = temp.inputs[0]
|
| 351 |
+
|
| 352 |
+
if temp and temp.type == 'Const':
|
| 353 |
+
val = temp.get_tensor_value(as_list=False)
|
| 354 |
+
dtype = utils.map_onnx_to_numpy_type(g.get_dtype(temp.output[0]))
|
| 355 |
+
val = val.astype(dtype)
|
| 356 |
+
logger.debug("found weights %s", temp.name)
|
| 357 |
+
else:
|
| 358 |
+
logger.debug("weight node seems not to be Const, skip, node name is %s", temp.name)
|
| 359 |
+
return None
|
| 360 |
+
|
| 361 |
+
return val
|
| 362 |
+
|
| 363 |
+
|
| 364 |
+
######################################################
|
| 365 |
+
#### Utilities for bidirectional rnn #######
|
| 366 |
+
######################################################
|
| 367 |
+
class ONNX_RNN_TYPE(Enum):
|
| 368 |
+
GRU = 0
|
| 369 |
+
LSTM = 1
|
| 370 |
+
|
| 371 |
+
|
| 372 |
+
onnx_rnn_type_mapping = {
|
| 373 |
+
ONNX_RNN_TYPE.GRU: "GRU",
|
| 374 |
+
ONNX_RNN_TYPE.LSTM: "LSTM"
|
| 375 |
+
}
|
| 376 |
+
|
| 377 |
+
onnx_rnn_attr_mapping = {
|
| 378 |
+
ONNX_RNN_TYPE.LSTM: [
|
| 379 |
+
"clip",
|
| 380 |
+
"hidden_size",
|
| 381 |
+
"input_forget"
|
| 382 |
+
],
|
| 383 |
+
ONNX_RNN_TYPE.GRU: {
|
| 384 |
+
"clip",
|
| 385 |
+
"hidden_size",
|
| 386 |
+
"linear_before_reset"
|
| 387 |
+
}
|
| 388 |
+
}
|
| 389 |
+
onnx_rnn_seq_len_index_mapping = {
|
| 390 |
+
ONNX_RNN_TYPE.LSTM: 4,
|
| 391 |
+
ONNX_RNN_TYPE.GRU: 4
|
| 392 |
+
}
|
| 393 |
+
|
| 394 |
+
|
| 395 |
+
def find_bidirectional_rnns(g, ops, rnn_type):
|
| 396 |
+
"""
|
| 397 |
+
Find possible bidirectional rnns, return: list of tuple,
|
| 398 |
+
Format of tuple is (fw onnx rnn node, bw onnx rnn node).
|
| 399 |
+
"""
|
| 400 |
+
fw_rnns = defaultdict(list)
|
| 401 |
+
bw_rnns = defaultdict(list)
|
| 402 |
+
for n in g.get_nodes():
|
| 403 |
+
if n.type != onnx_rnn_type_mapping[rnn_type]:
|
| 404 |
+
continue
|
| 405 |
+
|
| 406 |
+
input_id = n.input[0]
|
| 407 |
+
temp = n.inputs[0]
|
| 408 |
+
is_bw = False
|
| 409 |
+
if temp.type == "Transpose":
|
| 410 |
+
input_id = temp.input[0]
|
| 411 |
+
temp = temp.inputs[0]
|
| 412 |
+
|
| 413 |
+
if utils.is_tf_reverse_op(temp):
|
| 414 |
+
input_id = temp.input[0]
|
| 415 |
+
is_bw = True
|
| 416 |
+
|
| 417 |
+
if is_bw:
|
| 418 |
+
# if output 0 is consumed and there is no reverse after the 1st output.
|
| 419 |
+
# it's not backward rnn.
|
| 420 |
+
if g.find_output_consumers(n.output[0]) and not get_reverse_nodes_after_y_output(g, n):
|
| 421 |
+
logger.warning("rnn %s following Reverse op isn't the part of bi-rnn.", n.name)
|
| 422 |
+
continue
|
| 423 |
+
|
| 424 |
+
logger.debug("find bw rnn %s", input_id)
|
| 425 |
+
bw_rnns[input_id].append(n)
|
| 426 |
+
else:
|
| 427 |
+
logger.debug("find fw rnn %s", input_id)
|
| 428 |
+
fw_rnns[input_id].append(n)
|
| 429 |
+
|
| 430 |
+
# fw_rnn and bw_rnn must share the same input
|
| 431 |
+
birnn_input = list(set(fw_rnns.keys()).intersection(bw_rnns.keys()))
|
| 432 |
+
bi_rnns = []
|
| 433 |
+
matched_rnn = []
|
| 434 |
+
for inp in birnn_input:
|
| 435 |
+
fw_rnn = fw_rnns[inp]
|
| 436 |
+
bw_rnn = bw_rnns[inp]
|
| 437 |
+
# it's possible several bi-rnns share the same input
|
| 438 |
+
for fw_n in fw_rnn:
|
| 439 |
+
for bw_n in bw_rnn:
|
| 440 |
+
if belong_to_birnn(g, fw_n, bw_n, rnn_type) and \
|
| 441 |
+
fw_n not in matched_rnn and bw_n not in matched_rnn:
|
| 442 |
+
logger.debug("found birnn comprising %s and %s", fw_n.name, bw_n.name)
|
| 443 |
+
bi_rnns.append((fw_n, bw_n))
|
| 444 |
+
matched_rnn.extend([fw_n, bw_n])
|
| 445 |
+
return bi_rnns
|
| 446 |
+
|
| 447 |
+
|
| 448 |
+
def belong_to_birnn(g, fw_rnn, bw_rnn, rnn_type):
|
| 449 |
+
"""
|
| 450 |
+
Check whether fw_rnn and bw_rnn are part of the same birnn.
|
| 451 |
+
If fw_rnn and bw_rnn have the same attributes except those related to activation
|
| 452 |
+
and share the same seq_len, they are able to be merged into a bi-rnn.
|
| 453 |
+
"""
|
| 454 |
+
logger.debug("check whether %s and %s are part of birnn", fw_rnn.name, bw_rnn.name)
|
| 455 |
+
for name in onnx_rnn_attr_mapping[rnn_type]:
|
| 456 |
+
fw_attr_value = fw_rnn.get_attr_value(name)
|
| 457 |
+
bw_attr_value = bw_rnn.get_attr_value(name)
|
| 458 |
+
if fw_attr_value != bw_attr_value:
|
| 459 |
+
logger.debug(
|
| 460 |
+
"fw_rnn and bw_rnn mismatch at attr %s: %s, %s",
|
| 461 |
+
name, fw_attr_value, bw_attr_value
|
| 462 |
+
)
|
| 463 |
+
return False
|
| 464 |
+
|
| 465 |
+
seq_len_index = onnx_rnn_seq_len_index_mapping[rnn_type]
|
| 466 |
+
fw_seq_len = fw_rnn.input[seq_len_index]
|
| 467 |
+
bw_seq_len = bw_rnn.input[seq_len_index]
|
| 468 |
+
if not utils.have_same_inference_value(g, fw_seq_len, bw_seq_len):
|
| 469 |
+
logger.debug(
|
| 470 |
+
"fw_rnn and bw_rnn have different seq_len input: %s, %s",
|
| 471 |
+
fw_seq_len, bw_seq_len
|
| 472 |
+
)
|
| 473 |
+
return False
|
| 474 |
+
|
| 475 |
+
return True
|
| 476 |
+
|
| 477 |
+
|
| 478 |
+
def get_reverse_nodes_after_y_output(g, rnn_bw):
|
| 479 |
+
bw_consumers = g.find_output_consumers(rnn_bw.output[0])
|
| 480 |
+
|
| 481 |
+
# todo: figure out a better way to remove reverse op
|
| 482 |
+
squeeze_nodes = [c for c in bw_consumers if c.type == "Squeeze"]
|
| 483 |
+
s_cnt = len(squeeze_nodes)
|
| 484 |
+
if s_cnt == 1:
|
| 485 |
+
s = squeeze_nodes[0]
|
| 486 |
+
trans_nodes = g.find_output_consumers(s.output[0])
|
| 487 |
+
if len(trans_nodes) == 1:
|
| 488 |
+
if trans_nodes[0].type == "Transpose":
|
| 489 |
+
reverse_nodes = g.find_output_consumers(trans_nodes[0].output[0])
|
| 490 |
+
elif utils.is_tf_reverse_op(trans_nodes[0]):
|
| 491 |
+
reverse_nodes = trans_nodes
|
| 492 |
+
else:
|
| 493 |
+
logger.debug("not found reverse op, unexpected")
|
| 494 |
+
return []
|
| 495 |
+
|
| 496 |
+
are_all_reverse = all([utils.is_tf_reverse_op(r_op) for r_op in reverse_nodes])
|
| 497 |
+
if are_all_reverse:
|
| 498 |
+
return reverse_nodes
|
| 499 |
+
|
| 500 |
+
logger.debug("bw y output is used followed by reverse node")
|
| 501 |
+
return []
|
| 502 |
+
|
| 503 |
+
logger.debug("unexpected number of transpose after RNN 1st output:%s", s_cnt)
|
| 504 |
+
return []
|
| 505 |
+
|
| 506 |
+
logger.debug("unexpected number of squeeze following RNN 1st output:%s", s_cnt)
|
| 507 |
+
return []
|
| 508 |
+
|
| 509 |
+
|
| 510 |
+
def get_np_val_for_const(g, node, input_index):
|
| 511 |
+
return node.inputs[input_index].get_tensor_value(as_list=False)
|
| 512 |
+
|
| 513 |
+
|
| 514 |
+
def check_const(g, input_id):
|
| 515 |
+
node = g.get_node_by_output(input_id)
|
| 516 |
+
if node and node.is_const():
|
| 517 |
+
return (True, node.get_tensor_value(as_list=False))
|
| 518 |
+
return (None, None)
|
| 519 |
+
|
| 520 |
+
|
| 521 |
+
def process_single_init_node(g, fw_init_input_id, bw_init_input_id, to_append):
|
| 522 |
+
fw_init_is_const, init_fw_val = check_const(g, fw_init_input_id)
|
| 523 |
+
bw_init_is_const, init_bw_val = check_const(g, bw_init_input_id)
|
| 524 |
+
if fw_init_is_const and bw_init_is_const:
|
| 525 |
+
initial_val = np.concatenate((init_fw_val, init_bw_val), axis=0)
|
| 526 |
+
init_name = utils.make_name("initial")
|
| 527 |
+
init_node = g.make_const(init_name, initial_val, skip_conversion=True)
|
| 528 |
+
else:
|
| 529 |
+
init_node = g.make_node("Concat", [fw_init_input_id, bw_init_input_id], attr={"axis": 0})
|
| 530 |
+
|
| 531 |
+
to_append.append(init_node)
|
| 532 |
+
return init_node
|
| 533 |
+
|
| 534 |
+
|
| 535 |
+
def slice_birnn_for_original_rnn_consumers(g, rnn_fw, rnn_bw, bi_rnn, rnn_output_index, all_nodes, to_remove):
|
| 536 |
+
fw_consumers = g.find_output_consumers(rnn_fw.output[rnn_output_index])
|
| 537 |
+
bw_consumers = g.find_output_consumers(rnn_bw.output[rnn_output_index])
|
| 538 |
+
if not fw_consumers and not bw_consumers:
|
| 539 |
+
return
|
| 540 |
+
|
| 541 |
+
if rnn_output_index == 0:
|
| 542 |
+
axis = 1
|
| 543 |
+
# remove reverse op for rnn_bw
|
| 544 |
+
reverse_nodes = get_reverse_nodes_after_y_output(g, rnn_bw)
|
| 545 |
+
|
| 546 |
+
for r_op in reverse_nodes:
|
| 547 |
+
logger.debug("remove reverse op %s", r_op.name)
|
| 548 |
+
g.replace_all_inputs(r_op.output[0], r_op.input[0], ops=all_nodes)
|
| 549 |
+
to_remove.append(r_op.name)
|
| 550 |
+
elif rnn_output_index in [1, 2]:
|
| 551 |
+
axis = 0
|
| 552 |
+
else:
|
| 553 |
+
raise ValueError("rnn only should has 3 outputs.")
|
| 554 |
+
|
| 555 |
+
if fw_consumers:
|
| 556 |
+
attr = {"axes": [axis], "starts": [0], "ends": [1]}
|
| 557 |
+
inputs_map = {"data": bi_rnn.output[rnn_output_index], **attr}
|
| 558 |
+
slice_node_fw = GraphBuilder(g).make_slice(inputs_map)
|
| 559 |
+
all_nodes.append(g.get_node_by_output(slice_node_fw))
|
| 560 |
+
g.replace_all_inputs(rnn_fw.output[rnn_output_index], slice_node_fw, ops=fw_consumers)
|
| 561 |
+
|
| 562 |
+
if bw_consumers:
|
| 563 |
+
attr = {"axes": [axis], "starts": [1], "ends": [2]}
|
| 564 |
+
inputs_map = {"data": bi_rnn.output[rnn_output_index], **attr}
|
| 565 |
+
slice_node_bw = GraphBuilder(g).make_slice(inputs_map)
|
| 566 |
+
all_nodes.append(g.get_node_by_output(slice_node_bw))
|
| 567 |
+
g.replace_all_inputs(rnn_bw.output[rnn_output_index], slice_node_bw, ops=bw_consumers)
|
| 568 |
+
|
| 569 |
+
|
| 570 |
+
def remove_reverse_in_bw_input(g, bw_rnn_input_x, rnn_type):
|
| 571 |
+
old_x_consumers = g.find_output_consumers(bw_rnn_input_x)
|
| 572 |
+
# the transpose/reverse here must be followed by RNN if it is still useful.
|
| 573 |
+
# this is guaranteed by dynamic_rnn logic.
|
| 574 |
+
old_x_has_rnn_as_consumer = [n for n in old_x_consumers if n.type == onnx_rnn_type_mapping[rnn_type]]
|
| 575 |
+
if not old_x_has_rnn_as_consumer:
|
| 576 |
+
logger.debug("plan to remove useless reverse op in bw")
|
| 577 |
+
reverse_node = g.get_node_by_output(bw_rnn_input_x)
|
| 578 |
+
|
| 579 |
+
if reverse_node.type == "Transpose":
|
| 580 |
+
reverse_node = reverse_node.inputs[0]
|
| 581 |
+
|
| 582 |
+
g.replace_all_inputs(reverse_node.output[0], reverse_node.input[0]) # ops=g.get_nodes()
|
| 583 |
+
g.remove_node(reverse_node.name)
|
| 584 |
+
else:
|
| 585 |
+
raise ValueError("Reverse is still used by RNN as input, cannot remove")
|
lib/python3.10/site-packages/tf2onnx/rewriter/thresholded_relu_rewriter.py
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
"""
|
| 5 |
+
tf2onnx.rewriter - rewrite tensorflow subgraph to onnx ThresholdedRelu op
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
from tf2onnx.graph_matcher import OpTypePattern, GraphMatcher
|
| 9 |
+
from tf2onnx.rewriter.leakyrelu_rewriter import _find_edge_name_between_nodes
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
# pylint: disable=missing-docstring
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def rewrite_thresholded_relu(g, ops):
|
| 16 |
+
if g.opset < 10:
|
| 17 |
+
return ops
|
| 18 |
+
|
| 19 |
+
pattern = \
|
| 20 |
+
OpTypePattern('Mul', name='mul', inputs=[
|
| 21 |
+
OpTypePattern('Cast', name='cast', inputs=[
|
| 22 |
+
OpTypePattern('Greater', name='greater', inputs=[
|
| 23 |
+
OpTypePattern('*', name='greater_input'),
|
| 24 |
+
OpTypePattern('Const', name='theta')
|
| 25 |
+
])
|
| 26 |
+
]),
|
| 27 |
+
OpTypePattern('*', name='mul_input')
|
| 28 |
+
])
|
| 29 |
+
matcher = GraphMatcher(pattern, allow_reorder=True)
|
| 30 |
+
match_results = list(matcher.match_ops(ops))
|
| 31 |
+
|
| 32 |
+
for match in match_results:
|
| 33 |
+
greater_node = match.get_op('greater')
|
| 34 |
+
greater_input_node = match.get_op('greater_input')
|
| 35 |
+
mul_node = match.get_op("mul")
|
| 36 |
+
mul_input_node = match.get_op('mul_input')
|
| 37 |
+
cast_node = match.get_op('cast')
|
| 38 |
+
|
| 39 |
+
greater_input_edge_name = _find_edge_name_between_nodes(greater_input_node, greater_node)
|
| 40 |
+
mul_input_edge_name = _find_edge_name_between_nodes(mul_input_node, mul_node)
|
| 41 |
+
if greater_input_edge_name == mul_input_edge_name:
|
| 42 |
+
theta = match.get_op('theta').get_tensor_value()
|
| 43 |
+
thresholded_relu = g.make_node("ThresholdedRelu", inputs=[mul_input_edge_name], attr={"alpha": theta},
|
| 44 |
+
shapes=[g.get_shape(mul_node.output[0])],
|
| 45 |
+
dtypes=[g.get_dtype(mul_node.output[0])])
|
| 46 |
+
g.replace_all_inputs(mul_node.output[0], thresholded_relu.output[0], ops=ops)
|
| 47 |
+
to_delete = [cast_node, mul_node]
|
| 48 |
+
g.safe_remove_nodes(to_delete)
|
| 49 |
+
return ops
|
lib/python3.10/site-packages/tf2onnx/rewriter/transpose_rewriter.py
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
"""
|
| 5 |
+
tf2onnx.rewriter - rewrite tensorflow transpose op
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
from tf2onnx.graph_matcher import OpTypePattern, GraphMatcher
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
# pylint: disable=missing-docstring
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def rewrite_transpose(g, ops):
|
| 15 |
+
pattern = \
|
| 16 |
+
OpTypePattern('Transpose', name='output', inputs=[
|
| 17 |
+
OpTypePattern(None),
|
| 18 |
+
OpTypePattern('Sub', inputs=[
|
| 19 |
+
OpTypePattern('Sub', inputs=["*", "*"]),
|
| 20 |
+
OpTypePattern('Range', inputs=["*", "*", "*"]),
|
| 21 |
+
]),
|
| 22 |
+
])
|
| 23 |
+
|
| 24 |
+
matcher = GraphMatcher(pattern)
|
| 25 |
+
match_results = list(matcher.match_ops(ops))
|
| 26 |
+
for match in match_results:
|
| 27 |
+
output = match.get_op('output')
|
| 28 |
+
shape = g.get_shape(output.input[0])
|
| 29 |
+
dims = range(len(shape) - 1, -1, -1)
|
| 30 |
+
output.set_attr("perm", dims)
|
| 31 |
+
g.remove_input(output, output.input[1], 1)
|
| 32 |
+
to_delete = [n for n in match.get_nodes() if n != output]
|
| 33 |
+
g.safe_remove_nodes(to_delete)
|
| 34 |
+
return ops
|
lib/python3.10/site-packages/tf2onnx/rewriter/unit_rnn_rewriter_base.py
ADDED
|
@@ -0,0 +1,306 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
"""
|
| 5 |
+
tf2onnx.rewriter.unit_rnn_rewriter_base
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
from __future__ import division
|
| 9 |
+
from __future__ import print_function
|
| 10 |
+
import logging
|
| 11 |
+
|
| 12 |
+
from tf2onnx.rewriter.loop_rewriter_base import LoopRewriterBase, Context
|
| 13 |
+
from tf2onnx.rewriter.rnn_utils import REWRITER_RESULT, get_pattern, \
|
| 14 |
+
get_rnn_scope_name, parse_rnn_loop, seq_len_pattern0, seq_len_pattern1
|
| 15 |
+
from tf2onnx.utils import is_tf_select_op, is_tf_tensor_array_write_op
|
| 16 |
+
from tf2onnx.graph_matcher import GraphMatcher
|
| 17 |
+
from tf2onnx.graph_builder import GraphBuilder
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
logger = logging.getLogger(__name__)
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
# pylint: disable=missing-docstring,invalid-name,unused-argument,using-constant-test,broad-except,protected-access
|
| 24 |
+
|
| 25 |
+
class UnitRnnContext(Context):
|
| 26 |
+
def __init__(self):
|
| 27 |
+
super(UnitRnnContext, self).__init__()
|
| 28 |
+
self.rnn_scope = None
|
| 29 |
+
self.cell_match = None # matched cell
|
| 30 |
+
|
| 31 |
+
self.weights = {}
|
| 32 |
+
self.seq_len_node = None
|
| 33 |
+
self.state_variables = {}
|
| 34 |
+
self.input_size = None
|
| 35 |
+
self.hidden_size = None
|
| 36 |
+
|
| 37 |
+
self.attributes = {} # onnx attributes
|
| 38 |
+
# onnx inputs: [X, W, R, B, sequence_lens, initial_h, initial_c, P],
|
| 39 |
+
# sequence_lens is optional, i.e., None
|
| 40 |
+
self.onnx_input_ids = {}
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
class UnitRnnRewriterBase(LoopRewriterBase):
|
| 44 |
+
"""
|
| 45 |
+
main procedures:
|
| 46 |
+
1 extract info of while_loop based on loop_rewriter_base
|
| 47 |
+
2 check whether extracted loop is a unit rnn, fall back in necessity:
|
| 48 |
+
1 parse rnn scope name
|
| 49 |
+
2 check if it's a dynamic_rnn
|
| 50 |
+
3 find needed info from tensorflow graph
|
| 51 |
+
3 process found info according to ONNX requirement
|
| 52 |
+
"""
|
| 53 |
+
def __init__(self, g):
|
| 54 |
+
super(UnitRnnRewriterBase, self).__init__(g)
|
| 55 |
+
# {var_name: (finder, connector)}
|
| 56 |
+
self.state_variable_handler = None
|
| 57 |
+
self.state_variable_handlers = None
|
| 58 |
+
|
| 59 |
+
def create_context(self):
|
| 60 |
+
return UnitRnnContext()
|
| 61 |
+
|
| 62 |
+
def run(self):
|
| 63 |
+
return self.run_internal()
|
| 64 |
+
|
| 65 |
+
def need_rewrite(self, context):
|
| 66 |
+
context.rnn_scope = get_rnn_scope_name(context.while_context_scope)
|
| 67 |
+
|
| 68 |
+
if not parse_rnn_loop(self.g, context.loop_properties, context.rnn_scope,
|
| 69 |
+
context.while_context_scope):
|
| 70 |
+
logger.debug("parse_rnn_loop failed, SKIP")
|
| 71 |
+
return False
|
| 72 |
+
|
| 73 |
+
if not self.parse_unit_rnn(context):
|
| 74 |
+
logger.debug("failed to parse unit rnn, SKIP")
|
| 75 |
+
return False
|
| 76 |
+
|
| 77 |
+
if not self.is_valid(context):
|
| 78 |
+
logger.debug("parsed rnn is not valid, SKIP")
|
| 79 |
+
return False
|
| 80 |
+
return True
|
| 81 |
+
|
| 82 |
+
def is_valid(self, context):
|
| 83 |
+
return True
|
| 84 |
+
|
| 85 |
+
def parse_unit_rnn(self, context):
|
| 86 |
+
"""
|
| 87 |
+
parse needed info from tensorflow graph:
|
| 88 |
+
1 weight
|
| 89 |
+
2 state variables used in rnn unit, such as c_t, h_t
|
| 90 |
+
3 sequence node
|
| 91 |
+
4 input_x
|
| 92 |
+
5 attributes, e.g., activation_alpha, activation_beta... optional
|
| 93 |
+
"""
|
| 94 |
+
logger.debug("parse unit rnn")
|
| 95 |
+
|
| 96 |
+
logger.debug("match unit cell against loop body graph")
|
| 97 |
+
cell_match = self.find_cell(context)
|
| 98 |
+
if not cell_match:
|
| 99 |
+
logger.debug('failed to match cell pattern')
|
| 100 |
+
return False
|
| 101 |
+
context.cell_match = cell_match
|
| 102 |
+
|
| 103 |
+
logger.debug("get_weight_and_bias starts")
|
| 104 |
+
weights = self.get_weight_and_bias(context)
|
| 105 |
+
if not weights:
|
| 106 |
+
logger.debug("rnn weights check failed, SKIP")
|
| 107 |
+
return False
|
| 108 |
+
context.weights = weights
|
| 109 |
+
|
| 110 |
+
if not self.get_state_variables(context):
|
| 111 |
+
logger.debug("no cell variable initializers found, SKIP")
|
| 112 |
+
return False
|
| 113 |
+
|
| 114 |
+
seq_len_node = self.find_sequence_length_node(context)
|
| 115 |
+
if seq_len_node:
|
| 116 |
+
logger.debug("find sequence node: %s", seq_len_node.name)
|
| 117 |
+
context.onnx_input_ids["sequence_lens"] = seq_len_node.output[0]
|
| 118 |
+
else:
|
| 119 |
+
context.onnx_input_ids["sequence_lens"] = None
|
| 120 |
+
|
| 121 |
+
# require exact one input
|
| 122 |
+
inputs = context.loop_properties.scan_inputs_initial_values
|
| 123 |
+
if len(inputs) != 1:
|
| 124 |
+
logger.debug("found %d inputs for the unit rnn: %s",
|
| 125 |
+
len(inputs), inputs)
|
| 126 |
+
return False
|
| 127 |
+
context.onnx_input_ids["X"] = inputs[0]
|
| 128 |
+
|
| 129 |
+
if not self.parse_attributes(context):
|
| 130 |
+
logger.debug("wrong attributes found")
|
| 131 |
+
return False
|
| 132 |
+
|
| 133 |
+
return True
|
| 134 |
+
|
| 135 |
+
def find_cell(self, context):
|
| 136 |
+
raise NotImplementedError()
|
| 137 |
+
|
| 138 |
+
def _match_cell(self, context, unittype):
|
| 139 |
+
"""match unit cell"""
|
| 140 |
+
for cell_pattern in get_pattern(unittype):
|
| 141 |
+
matcher = GraphMatcher(cell_pattern, allow_reorder=True)
|
| 142 |
+
|
| 143 |
+
loop_props = context.loop_properties
|
| 144 |
+
inputs = loop_props.state_inputs + loop_props.scan_inputs
|
| 145 |
+
input_ids = [input_tensor_value_info.id for input_tensor_value_info in inputs]
|
| 146 |
+
outputs = loop_props.state_outputs + loop_props.scan_outputs
|
| 147 |
+
output_ids = [out_tensor_value_info.id for out_tensor_value_info in outputs]
|
| 148 |
+
body_graph_ops, _, _ = LoopRewriterBase.find_subgraph(
|
| 149 |
+
set(input_ids),
|
| 150 |
+
set(output_ids),
|
| 151 |
+
self.g, merge_as_end=True
|
| 152 |
+
)
|
| 153 |
+
|
| 154 |
+
match_results = list(matcher.match_ops(body_graph_ops))
|
| 155 |
+
if len(match_results) == 1:
|
| 156 |
+
return match_results[0]
|
| 157 |
+
return None
|
| 158 |
+
|
| 159 |
+
def get_weight_and_bias(self, context):
|
| 160 |
+
raise NotImplementedError()
|
| 161 |
+
|
| 162 |
+
def parse_attributes(self, context):
|
| 163 |
+
return True
|
| 164 |
+
|
| 165 |
+
def rewrite(self, context):
|
| 166 |
+
logger.debug("enter unit rnn rewrite function")
|
| 167 |
+
|
| 168 |
+
logger.debug("process the weights/bias/ft_bias, to fit onnx weights/bias requirements")
|
| 169 |
+
self.process_weights_and_bias(context)
|
| 170 |
+
|
| 171 |
+
self.process_var_init_nodes(context)
|
| 172 |
+
|
| 173 |
+
logger.debug("start to build new rnn node")
|
| 174 |
+
|
| 175 |
+
rnn_node = self.create_rnn_node(context)
|
| 176 |
+
context.rnn_node = rnn_node
|
| 177 |
+
|
| 178 |
+
logger.debug("start to handle outputs")
|
| 179 |
+
# format of ONNX output is different with tf
|
| 180 |
+
self.process_outputs(context)
|
| 181 |
+
|
| 182 |
+
logger.debug("rewrite successfully")
|
| 183 |
+
return REWRITER_RESULT.OK
|
| 184 |
+
|
| 185 |
+
def get_state_variables(self, context):
|
| 186 |
+
"""
|
| 187 |
+
Get state variables by provided handlers. There maybe several handlers corresponding to
|
| 188 |
+
different patterns of state variables.
|
| 189 |
+
The commone method is to find state variables from loop property according to its
|
| 190 |
+
next_iteration_input and switch_true_identity_output, see lstm_rewriter_v2
|
| 191 |
+
"""
|
| 192 |
+
for handler in self.state_variable_handlers:
|
| 193 |
+
can_handle = True
|
| 194 |
+
for var_name, funcs in handler.items():
|
| 195 |
+
finder = funcs[0]
|
| 196 |
+
state_variable = finder(context)
|
| 197 |
+
if state_variable:
|
| 198 |
+
logger.debug("found state variable %s", var_name)
|
| 199 |
+
context.state_variables[var_name] = state_variable
|
| 200 |
+
else:
|
| 201 |
+
logger.debug("failed to get state variable %s", var_name)
|
| 202 |
+
can_handle = False
|
| 203 |
+
break
|
| 204 |
+
if can_handle:
|
| 205 |
+
self.state_variable_handler = handler
|
| 206 |
+
return True
|
| 207 |
+
return False
|
| 208 |
+
|
| 209 |
+
def find_sequence_length_node(self, context):
|
| 210 |
+
# get any state variable
|
| 211 |
+
state_variable = list(context.state_variables.values())[0]
|
| 212 |
+
next_iter_input_node = self.g.get_node_by_output(state_variable.next_iteration_input.id)
|
| 213 |
+
if not is_tf_select_op(next_iter_input_node):
|
| 214 |
+
logger.debug("no sequence length node is given")
|
| 215 |
+
return None
|
| 216 |
+
matcher = GraphMatcher(seq_len_pattern0)
|
| 217 |
+
match_result = matcher.match_op(next_iter_input_node)
|
| 218 |
+
if not match_result:
|
| 219 |
+
matcher = GraphMatcher(seq_len_pattern1)
|
| 220 |
+
match_result = matcher.match_op(next_iter_input_node)
|
| 221 |
+
if not match_result:
|
| 222 |
+
raise RuntimeError("failed to find sequence length.")
|
| 223 |
+
return match_result.get_op("seq_len_node")
|
| 224 |
+
|
| 225 |
+
def process_weights_and_bias(self, context):
|
| 226 |
+
raise NotImplementedError()
|
| 227 |
+
|
| 228 |
+
def process_var_init_nodes(self, context):
|
| 229 |
+
raise NotImplementedError()
|
| 230 |
+
|
| 231 |
+
def create_rnn_node(self, context):
|
| 232 |
+
raise NotImplementedError()
|
| 233 |
+
|
| 234 |
+
def process_outputs(self, context):
|
| 235 |
+
for var_name, funcs in self.state_variable_handler.items():
|
| 236 |
+
output_connector = funcs[1]
|
| 237 |
+
output_connector(context)
|
| 238 |
+
logger.debug("connect output of %s to graph", var_name)
|
| 239 |
+
|
| 240 |
+
self.connect_unit_rnn_output_to_graph(context)
|
| 241 |
+
|
| 242 |
+
def connect_unit_rnn_output_to_graph(self, context):
|
| 243 |
+
outputs = context.loop_properties.scan_outputs_exits
|
| 244 |
+
if not outputs:
|
| 245 |
+
logger.debug("no one consume output")
|
| 246 |
+
return
|
| 247 |
+
|
| 248 |
+
gather_output_id = outputs[0].id
|
| 249 |
+
logger.debug("found output for rnn: %s", gather_output_id)
|
| 250 |
+
|
| 251 |
+
# in tf batch major mode, output shape is : [batch, time, hidden]
|
| 252 |
+
# in time major mode, output shape is: [time, batch, hidden]
|
| 253 |
+
# in onnx, output shape is : [time, num_directions, batch, hidden]
|
| 254 |
+
|
| 255 |
+
rnn_node = context.rnn_node
|
| 256 |
+
output_id = rnn_node.output[0]
|
| 257 |
+
rnn_output_shape = self.g.get_shape(output_id)
|
| 258 |
+
squeeze_output_shape = [rnn_output_shape[0], rnn_output_shape[2], rnn_output_shape[3]]
|
| 259 |
+
gb = GraphBuilder(self.g)
|
| 260 |
+
squeeze_node = gb.make_squeeze({'data': output_id, "axes": [1]},
|
| 261 |
+
shapes=[squeeze_output_shape],
|
| 262 |
+
dtypes=[self.g.get_dtype(output_id)],
|
| 263 |
+
return_node=True)
|
| 264 |
+
self.g.replace_all_inputs(gather_output_id, squeeze_node.output[0]) # ops=self.g.get_nodes()
|
| 265 |
+
|
| 266 |
+
def _find_state_variable_with_select(self, context,
|
| 267 |
+
next_iteration_input,
|
| 268 |
+
switch_true_identity_consumers):
|
| 269 |
+
"""
|
| 270 |
+
Find state variables from switch_true_identity_consumers to next_iteration_input.
|
| 271 |
+
Select maybe added after next_iteration_input.
|
| 272 |
+
"""
|
| 273 |
+
# find all select not followed by TensorArrayWrite
|
| 274 |
+
select = []
|
| 275 |
+
for c in self.g.find_output_consumers(next_iteration_input):
|
| 276 |
+
if not is_tf_select_op(c):
|
| 277 |
+
continue
|
| 278 |
+
out_ta_writer = [
|
| 279 |
+
o for o in self.g.find_output_consumers(c.output[0]) if is_tf_tensor_array_write_op(o)
|
| 280 |
+
]
|
| 281 |
+
if out_ta_writer:
|
| 282 |
+
continue
|
| 283 |
+
select.append(c)
|
| 284 |
+
if len(select) == 1:
|
| 285 |
+
next_iteration_input = select[0].output[0]
|
| 286 |
+
switch_true_identity_consumers.append(select[0])
|
| 287 |
+
|
| 288 |
+
logger.debug(
|
| 289 |
+
"try to find state variable from [%s, %s]",
|
| 290 |
+
next_iteration_input,
|
| 291 |
+
switch_true_identity_consumers
|
| 292 |
+
)
|
| 293 |
+
|
| 294 |
+
def checker(state_variable):
|
| 295 |
+
if state_variable.next_iteration_input.id != next_iteration_input:
|
| 296 |
+
return False
|
| 297 |
+
for consumer in switch_true_identity_consumers:
|
| 298 |
+
if state_variable.switch_true_identity_output.id not in consumer.input:
|
| 299 |
+
return False
|
| 300 |
+
return True
|
| 301 |
+
|
| 302 |
+
state_variables = context.loop_properties.get_variables(checker)
|
| 303 |
+
if len(state_variables) != 1:
|
| 304 |
+
logger.debug("found %d state variables", len(state_variables))
|
| 305 |
+
return None
|
| 306 |
+
return state_variables[0]
|
lib/python3.10/site-packages/tf2onnx/schemas.py
ADDED
|
@@ -0,0 +1,191 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
"""
|
| 5 |
+
tf2onnx.schema
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
from __future__ import division
|
| 9 |
+
from __future__ import print_function
|
| 10 |
+
from __future__ import unicode_literals
|
| 11 |
+
|
| 12 |
+
import logging
|
| 13 |
+
import copy
|
| 14 |
+
from collections import defaultdict, OrderedDict
|
| 15 |
+
from onnx import defs, helper, TensorProto, OperatorSetIdProto, shape_inference
|
| 16 |
+
|
| 17 |
+
from . import constants
|
| 18 |
+
from . import utils
|
| 19 |
+
|
| 20 |
+
logger = logging.getLogger(__name__)
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
class OnnxOpSchema(object):
|
| 24 |
+
"""Wrapper for Onnx schema."""
|
| 25 |
+
|
| 26 |
+
def __init__(self, name, domain, since_version, attributes):
|
| 27 |
+
"""Create a Onnx schema
|
| 28 |
+
Args:
|
| 29 |
+
name (str): op name
|
| 30 |
+
attributes (List[str]): valid attributes
|
| 31 |
+
domain (str): default value "" means it's Onnx domain
|
| 32 |
+
since_version (int): opset version, default is 1
|
| 33 |
+
"""
|
| 34 |
+
self._name = name
|
| 35 |
+
self._domain = domain
|
| 36 |
+
self._attributes = attributes
|
| 37 |
+
self._since_version = since_version
|
| 38 |
+
|
| 39 |
+
@property
|
| 40 |
+
def attributes(self):
|
| 41 |
+
return self._attributes
|
| 42 |
+
|
| 43 |
+
@property
|
| 44 |
+
def domain(self):
|
| 45 |
+
return self._domain
|
| 46 |
+
|
| 47 |
+
@property
|
| 48 |
+
def name(self):
|
| 49 |
+
return self._name
|
| 50 |
+
|
| 51 |
+
@property
|
| 52 |
+
def since_version(self):
|
| 53 |
+
return self._since_version
|
| 54 |
+
|
| 55 |
+
@staticmethod
|
| 56 |
+
def from_onnx_schema(onnx_schema):
|
| 57 |
+
name = onnx_schema.name
|
| 58 |
+
domain = onnx_schema.domain
|
| 59 |
+
since_version = int(onnx_schema.since_version)
|
| 60 |
+
attributes = onnx_schema.attributes
|
| 61 |
+
return OnnxOpSchema(name, domain, since_version, attributes)
|
| 62 |
+
|
| 63 |
+
def has_attribute(self, attr):
|
| 64 |
+
return attr in self.attributes
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
def _register_all_schemas_with_history():
|
| 68 |
+
"""Register all schemas with history"""
|
| 69 |
+
onnx_schemas = defs.get_all_schemas_with_history()
|
| 70 |
+
name_domain_version_schema_map = defaultdict(lambda: defaultdict(dict))
|
| 71 |
+
for s in onnx_schemas:
|
| 72 |
+
schema = OnnxOpSchema.from_onnx_schema(s)
|
| 73 |
+
name_domain_version_schema_map[schema.name][schema.domain][schema.since_version] = schema
|
| 74 |
+
|
| 75 |
+
ordered_map = defaultdict(lambda: defaultdict(OrderedDict))
|
| 76 |
+
for name, domain_version_schema_map in name_domain_version_schema_map.items():
|
| 77 |
+
for domain, version_schema_map in domain_version_schema_map.items():
|
| 78 |
+
ordered_map[name][domain] = OrderedDict(
|
| 79 |
+
sorted(version_schema_map.items(), key=lambda x: -x[0])
|
| 80 |
+
)
|
| 81 |
+
return ordered_map
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
def _parse_domain_opset_versions(schemas):
|
| 85 |
+
""" Get max opset version among all schemas within each domain. """
|
| 86 |
+
domain_opset_versions = dict()
|
| 87 |
+
for domain_version_schema_map in schemas.values():
|
| 88 |
+
for domain, version_schema_map in domain_version_schema_map.items():
|
| 89 |
+
# version_schema_map is sorted by since_version in descend order
|
| 90 |
+
max_version = next(iter(version_schema_map))
|
| 91 |
+
if domain not in domain_opset_versions:
|
| 92 |
+
domain_opset_versions[domain] = int(max_version)
|
| 93 |
+
else:
|
| 94 |
+
domain_opset_versions[domain] = max(domain_opset_versions[domain], int(max_version))
|
| 95 |
+
return domain_opset_versions
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
# format is <OpName, <Domain, <SinceVersion, OpSchema>>>
|
| 99 |
+
# SinceVersion is sorted from high to low
|
| 100 |
+
_schemas = _register_all_schemas_with_history()
|
| 101 |
+
|
| 102 |
+
_domain_opset_versions = _parse_domain_opset_versions(_schemas)
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
def get_schema(name, max_inclusive_opset_version, domain=None):
|
| 106 |
+
"""Get schema by name within specific version."""
|
| 107 |
+
domain = domain or constants.ONNX_DOMAIN
|
| 108 |
+
domain_version_schema_map = _schemas[name]
|
| 109 |
+
version_schema_map = domain_version_schema_map[domain]
|
| 110 |
+
for version, schema in version_schema_map.items():
|
| 111 |
+
if version <= max_inclusive_opset_version:
|
| 112 |
+
return schema
|
| 113 |
+
return None
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
def get_max_supported_opset_version(domain=None):
|
| 117 |
+
"""Get max supported opset version by current onnx package given a domain."""
|
| 118 |
+
domain = domain or constants.ONNX_DOMAIN
|
| 119 |
+
return _domain_opset_versions.get(domain, None)
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
def infer_onnx_shape_dtype(node, opset_version, input_shapes, input_dtypes, initializers=None):
|
| 123 |
+
"""
|
| 124 |
+
Infer shapes and dtypes for outputs of the node.
|
| 125 |
+
Sometimes, shape inference needs the values of node's inputs, so initializers are used.
|
| 126 |
+
"""
|
| 127 |
+
|
| 128 |
+
def build_onnx_op(node):
|
| 129 |
+
"""Build onnx op"""
|
| 130 |
+
onnx_node = helper.make_node(node.type, node.input, node.output, name=node.name)
|
| 131 |
+
# deal with attributes
|
| 132 |
+
attr = []
|
| 133 |
+
attr_graphs = node.get_body_graphs()
|
| 134 |
+
if attr_graphs:
|
| 135 |
+
for attr_name, sub_graph in attr_graphs.items():
|
| 136 |
+
copied_sub_graph = copy.deepcopy(sub_graph)
|
| 137 |
+
graph_proto = copied_sub_graph.make_graph("graph for " + node.name + " " + attr_name)
|
| 138 |
+
attr.append(helper.make_attribute(attr_name, graph_proto))
|
| 139 |
+
attr.extend(node.get_onnx_attrs().values())
|
| 140 |
+
if attr:
|
| 141 |
+
onnx_node.attribute.extend(attr)
|
| 142 |
+
return onnx_node
|
| 143 |
+
|
| 144 |
+
inputs = []
|
| 145 |
+
outputs = []
|
| 146 |
+
for inp, shape, dtype in zip(node.input, input_shapes, input_dtypes):
|
| 147 |
+
inputs.append(utils.make_onnx_inputs_outputs(inp, dtype, shape))
|
| 148 |
+
for output in node.output:
|
| 149 |
+
outputs.append(utils.make_onnx_inputs_outputs(output, TensorProto.UNDEFINED, None))
|
| 150 |
+
graph_proto = helper.make_graph([build_onnx_op(node)], "infer-graph", inputs, outputs, initializer=initializers)
|
| 151 |
+
imp = OperatorSetIdProto()
|
| 152 |
+
imp.version = opset_version
|
| 153 |
+
model_proto = helper.make_model(graph_proto, opset_imports=[imp])
|
| 154 |
+
|
| 155 |
+
inferred_model = None
|
| 156 |
+
try:
|
| 157 |
+
inferred_model = shape_inference.infer_shapes(model_proto)
|
| 158 |
+
except Exception: # pylint: disable=broad-except
|
| 159 |
+
logger.warning(
|
| 160 |
+
"ONNX Failed to infer shapes and dtypes for [%s, type: %s]",
|
| 161 |
+
node.name, node.type, exc_info=1
|
| 162 |
+
)
|
| 163 |
+
return None, None
|
| 164 |
+
|
| 165 |
+
shapes = {}
|
| 166 |
+
dtypes = {}
|
| 167 |
+
for output in inferred_model.graph.output:
|
| 168 |
+
tensor_type = output.type.tensor_type
|
| 169 |
+
if tensor_type.HasField("elem_type"):
|
| 170 |
+
dtypes[output.name] = tensor_type.elem_type
|
| 171 |
+
else:
|
| 172 |
+
dtypes[output.name] = TensorProto.UNDEFINED
|
| 173 |
+
# 0 in shapes of onnx means unknown which is -1 in our convertor
|
| 174 |
+
if tensor_type.HasField("shape"):
|
| 175 |
+
shapes[output.name] = [
|
| 176 |
+
dim.dim_value if dim.dim_value != 0 else utils.ONNX_UNKNOWN_DIMENSION for dim in tensor_type.shape.dim
|
| 177 |
+
]
|
| 178 |
+
else:
|
| 179 |
+
shapes[output.name] = None
|
| 180 |
+
output_shapes = []
|
| 181 |
+
output_dtypes = []
|
| 182 |
+
for output in node.output:
|
| 183 |
+
if output in shapes:
|
| 184 |
+
output_shapes.append(shapes[output])
|
| 185 |
+
else:
|
| 186 |
+
output_shapes.append(None)
|
| 187 |
+
if output in dtypes:
|
| 188 |
+
output_dtypes.append(dtypes[output])
|
| 189 |
+
else:
|
| 190 |
+
output_dtypes.append(TensorProto.UNDEFINED)
|
| 191 |
+
return output_shapes, output_dtypes
|
lib/python3.10/site-packages/tf2onnx/shape_inference.py
ADDED
|
@@ -0,0 +1,576 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
"""
|
| 5 |
+
tf2onnx.shape_inference - shape inference function for tf2onnx
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
from __future__ import division
|
| 9 |
+
from __future__ import print_function
|
| 10 |
+
from __future__ import unicode_literals
|
| 11 |
+
import logging
|
| 12 |
+
from distutils.version import LooseVersion
|
| 13 |
+
from collections import defaultdict
|
| 14 |
+
import numpy as np
|
| 15 |
+
from tf2onnx import utils
|
| 16 |
+
from tf2onnx.tf_utils import get_tf_tensor_shape, get_tf_const_value, get_tf_shape_attr, get_tf_version
|
| 17 |
+
from tf2onnx.tf_loader import tf_reload_graph
|
| 18 |
+
|
| 19 |
+
# pylint: disable=logging-not-lazy,missing-docstring,consider-swap-variables
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
logger = logging.getLogger(__name__)
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def infer_shape(tf_graph, shape_override):
|
| 26 |
+
"""Infer shape for TF graph with shape_override set first."""
|
| 27 |
+
if shape_override:
|
| 28 |
+
logger.info("Apply shape override:")
|
| 29 |
+
for name, shape in shape_override.items():
|
| 30 |
+
logger.info("\tSet %s shape to %s", name, shape)
|
| 31 |
+
tf_graph.get_tensor_by_name(name).set_shape(shape)
|
| 32 |
+
tf_graph = tf_reload_graph(tf_graph)
|
| 33 |
+
|
| 34 |
+
tf_graph = infer_shape_for_graph(tf_graph)
|
| 35 |
+
|
| 36 |
+
op_outputs_with_none_shape = check_shape_for_tf_graph(tf_graph)
|
| 37 |
+
if op_outputs_with_none_shape:
|
| 38 |
+
if get_tf_version() > LooseVersion("1.5.0"):
|
| 39 |
+
for op, outs in op_outputs_with_none_shape.items():
|
| 40 |
+
logger.warning(
|
| 41 |
+
"Cannot infer shape for %s: %s",
|
| 42 |
+
op, ",".join(outs)
|
| 43 |
+
)
|
| 44 |
+
tf_graph = infer_shape_for_graph_legacy(tf_graph)
|
| 45 |
+
|
| 46 |
+
return tf_graph
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
def check_shape_for_tf_graph(tf_graph):
|
| 50 |
+
"""
|
| 51 |
+
Check whether TF graph misses any shape,
|
| 52 |
+
and return all ops with None shape outputs for TF graph.
|
| 53 |
+
"""
|
| 54 |
+
skip_list = {'FusedBatchNormV3': 5}
|
| 55 |
+
op_outputs_mapping_none_shape = defaultdict(list)
|
| 56 |
+
for op in tf_graph.get_operations():
|
| 57 |
+
for i, out in enumerate(op.outputs):
|
| 58 |
+
if op.type in skip_list:
|
| 59 |
+
if skip_list[op.type] == i:
|
| 60 |
+
continue
|
| 61 |
+
if get_tf_tensor_shape(out) is None:
|
| 62 |
+
op_outputs_mapping_none_shape[op.name].append(out.name)
|
| 63 |
+
return op_outputs_mapping_none_shape
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
def infer_shape_for_graph(tf_graph):
|
| 67 |
+
"""
|
| 68 |
+
Infer shape for Tensorflow ops.
|
| 69 |
+
Tensorflow explicitly sets shape for some ops in python code, such as Switch, Merge and TensorArrayGather.
|
| 70 |
+
These shapes may be lost after freezing TF graph to graph_def without add_shapes=True.
|
| 71 |
+
To bring these shapes back, we implement our own shape inference for these control flow ops based on one assumption:
|
| 72 |
+
**outputs of Merge op have the same shape (at least the same rank) of its inputs**.
|
| 73 |
+
With this assumption, our shape inference can handle:
|
| 74 |
+
1. in tf.cond, outputs of two branches have the same rank.
|
| 75 |
+
2. in tf.while_loop, loop variables don't change their rank.
|
| 76 |
+
"""
|
| 77 |
+
shape_updated = True
|
| 78 |
+
while shape_updated:
|
| 79 |
+
shape_updated = False
|
| 80 |
+
for o in tf_graph.get_operations():
|
| 81 |
+
updated = infer_shape_for_op(o)
|
| 82 |
+
if updated:
|
| 83 |
+
shape_updated = True
|
| 84 |
+
if shape_updated:
|
| 85 |
+
tf_graph = tf_reload_graph(tf_graph)
|
| 86 |
+
return tf_graph
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
def infer_shape_for_op(op):
|
| 90 |
+
has_unknown_output_shape = any(get_tf_tensor_shape(out) is None for out in op.outputs)
|
| 91 |
+
|
| 92 |
+
if not has_unknown_output_shape:
|
| 93 |
+
return False
|
| 94 |
+
|
| 95 |
+
if op.type == "Placeholder":
|
| 96 |
+
# if placeholder shape is not found, try to get it from "shape" attribute.
|
| 97 |
+
attr_shape = get_tf_shape_attr(op)
|
| 98 |
+
if attr_shape is not None:
|
| 99 |
+
new_shape = list(attr_shape)
|
| 100 |
+
op.outputs[0].set_shape(new_shape)
|
| 101 |
+
logger.debug("set placeholder op [%s] with new shape %s", op.outputs[0].name, new_shape)
|
| 102 |
+
return True
|
| 103 |
+
logger.warning("Shape of placeholder '%s' is unknown, treated it as a scalar. Please use the --inputs flag "
|
| 104 |
+
"and append the shape to the input name if this input is not a scalar.", op.name)
|
| 105 |
+
op.outputs[0].set_shape([])
|
| 106 |
+
return True
|
| 107 |
+
|
| 108 |
+
if op.type == "Merge":
|
| 109 |
+
s1 = get_tf_tensor_shape(op.inputs[0])
|
| 110 |
+
s2 = get_tf_tensor_shape(op.inputs[1])
|
| 111 |
+
new_shape = None
|
| 112 |
+
if s1 is None and s2 is None:
|
| 113 |
+
return False
|
| 114 |
+
if s1 is None and s2 is not None:
|
| 115 |
+
new_shape = s2
|
| 116 |
+
if s1 is not None and s2 is None:
|
| 117 |
+
new_shape = s1
|
| 118 |
+
|
| 119 |
+
if new_shape is not None:
|
| 120 |
+
op.inputs[0].set_shape(new_shape)
|
| 121 |
+
op.inputs[1].set_shape(new_shape)
|
| 122 |
+
op.outputs[0].set_shape(new_shape)
|
| 123 |
+
logger.debug("set [%s] with new shape %s", op.outputs[0].name, new_shape)
|
| 124 |
+
return True
|
| 125 |
+
|
| 126 |
+
# inputs' shapes both exist
|
| 127 |
+
if s1 != s2:
|
| 128 |
+
if len(s1) != len(s2):
|
| 129 |
+
logger.warning("Shapes of Merge %s have different ranks: %s, %s", op.name, len(s1), len(s2))
|
| 130 |
+
return False
|
| 131 |
+
|
| 132 |
+
logger.debug("Inputs of Merge %s have different shapes: %s, %s, but the same rank", op.name, s1, s2)
|
| 133 |
+
new_shape = _merge_shapes_for_tf(s1, s2)
|
| 134 |
+
op.outputs[0].set_shape(new_shape)
|
| 135 |
+
logger.debug("set [%s] with new shape %s", op.outputs[0].name, new_shape)
|
| 136 |
+
else:
|
| 137 |
+
new_shape = s1
|
| 138 |
+
op.outputs[0].set_shape(new_shape)
|
| 139 |
+
logger.debug("set [%s] with new shape %s", op.outputs[0].name, new_shape)
|
| 140 |
+
|
| 141 |
+
return True
|
| 142 |
+
|
| 143 |
+
if op.type == "Switch":
|
| 144 |
+
new_shape = get_tf_tensor_shape(op.inputs[0])
|
| 145 |
+
if new_shape is not None:
|
| 146 |
+
op.outputs[0].set_shape(new_shape)
|
| 147 |
+
op.outputs[1].set_shape(new_shape)
|
| 148 |
+
logger.debug("set [%s] with new shape %s", op.outputs[0].name, new_shape)
|
| 149 |
+
logger.debug("set [%s] with new shape %s", op.outputs[1].name, new_shape)
|
| 150 |
+
return True
|
| 151 |
+
return False
|
| 152 |
+
|
| 153 |
+
if op.type == "Enter":
|
| 154 |
+
new_shape = get_tf_tensor_shape(op.inputs[0])
|
| 155 |
+
if new_shape is not None:
|
| 156 |
+
op.outputs[0].set_shape(new_shape)
|
| 157 |
+
logger.debug("set [%s] with new shape %s", op.outputs[0].name, new_shape)
|
| 158 |
+
return True
|
| 159 |
+
return False
|
| 160 |
+
|
| 161 |
+
if op.type == "TensorArrayGatherV3":
|
| 162 |
+
# TensorArrayGatherV3's output: all of the elem in the TensorArray,
|
| 163 |
+
# concatenated along a new axis (the new dimension 0), so shape of TensorArray should be found first.
|
| 164 |
+
# And TensorArrayWrite will write elem to TensorArray, so shape of TensorArray can be got from TensorArrayWrite
|
| 165 |
+
# so the process is: first find TensorArrayWrite and then get TensorArray's shape,
|
| 166 |
+
# and finally add one dim to the shape is shape of TensorArrayGather
|
| 167 |
+
|
| 168 |
+
handle_op = op.inputs[0].op
|
| 169 |
+
if handle_op.type != "TensorArrayV3":
|
| 170 |
+
return False
|
| 171 |
+
|
| 172 |
+
# find TensorArrayWrite
|
| 173 |
+
tensor_array_write_op = _find_tensorarray_write(handle_op)
|
| 174 |
+
if not tensor_array_write_op:
|
| 175 |
+
return False
|
| 176 |
+
# get TensorArray shape from input tensor of the found TensorArrayWrite op
|
| 177 |
+
shape = get_tf_tensor_shape(tensor_array_write_op.inputs[2])
|
| 178 |
+
# update TensorArray's shape info
|
| 179 |
+
if shape is not None:
|
| 180 |
+
new_shape = [None] + shape
|
| 181 |
+
op.outputs[0].set_shape(new_shape)
|
| 182 |
+
logger.debug("set [%s] with new shape %s", op.outputs[0].name, new_shape)
|
| 183 |
+
return True
|
| 184 |
+
return False
|
| 185 |
+
|
| 186 |
+
if op.type == "TensorArrayReadV3":
|
| 187 |
+
# TensorArrayRead reads an element from the TensorArray into output value.
|
| 188 |
+
# The TensorArray's shape can be got from TensorArrayScatter.
|
| 189 |
+
# So the process is: first find TensorArrayScatter's shape and then TensorArray's
|
| 190 |
+
# and finally take its last n-1 dim.
|
| 191 |
+
flow_in_op = op.inputs[2].op
|
| 192 |
+
if flow_in_op.type != "Enter":
|
| 193 |
+
return False
|
| 194 |
+
|
| 195 |
+
scatter_op = flow_in_op.inputs[0].op
|
| 196 |
+
if scatter_op.type != "TensorArrayScatterV3":
|
| 197 |
+
return False
|
| 198 |
+
|
| 199 |
+
value_shape_before_scatter = get_tf_tensor_shape(scatter_op.inputs[2])
|
| 200 |
+
if value_shape_before_scatter is None:
|
| 201 |
+
return False
|
| 202 |
+
|
| 203 |
+
new_shape = value_shape_before_scatter[1:]
|
| 204 |
+
if new_shape is not None:
|
| 205 |
+
op.outputs[0].set_shape(new_shape)
|
| 206 |
+
logger.debug("set [%s] with new shape %s", op.outputs[0].name, new_shape)
|
| 207 |
+
return True
|
| 208 |
+
return False
|
| 209 |
+
|
| 210 |
+
return False
|
| 211 |
+
|
| 212 |
+
|
| 213 |
+
def _find_tensorarray_write(op):
|
| 214 |
+
utils.make_sure(op.type == "TensorArrayV3", "op should be tensorarray")
|
| 215 |
+
|
| 216 |
+
tensor_array_consumers = op.outputs[0].consumers()
|
| 217 |
+
for i in tensor_array_consumers:
|
| 218 |
+
if i.type == "Enter":
|
| 219 |
+
consumer_ops = i.outputs[0].consumers()
|
| 220 |
+
for j in consumer_ops:
|
| 221 |
+
if j.type == "TensorArrayWriteV3":
|
| 222 |
+
return j
|
| 223 |
+
return None
|
| 224 |
+
|
| 225 |
+
|
| 226 |
+
def _merge_shapes_for_tf(shape1, shape2):
|
| 227 |
+
"""
|
| 228 |
+
Merge 2 shapes, return merged shape, set unknown for dims with different values.
|
| 229 |
+
Raise exception for mismatch.
|
| 230 |
+
"""
|
| 231 |
+
if shape1 is None:
|
| 232 |
+
return shape2
|
| 233 |
+
if shape2 is None:
|
| 234 |
+
return shape1
|
| 235 |
+
|
| 236 |
+
utils.make_sure(utils.is_list_or_tuple(shape1), "invalid type for shape1")
|
| 237 |
+
utils.make_sure(utils.is_list_or_tuple(shape2), "invalid type for shape2")
|
| 238 |
+
utils.make_sure(len(shape1) == len(shape2), "shapes rank mismatch: shape1=%s, shape2=%s", shape1, shape2)
|
| 239 |
+
|
| 240 |
+
merged = []
|
| 241 |
+
for d1, d2 in zip(shape1, shape2):
|
| 242 |
+
d = d1
|
| 243 |
+
if d1 is None:
|
| 244 |
+
d = d2
|
| 245 |
+
elif d2 is not None:
|
| 246 |
+
# None means unknown in tensorflow
|
| 247 |
+
d = None
|
| 248 |
+
merged.append(d)
|
| 249 |
+
return merged
|
| 250 |
+
|
| 251 |
+
|
| 252 |
+
######################################################################
|
| 253 |
+
#### Below is our old tf shape inference as a supplementary ####
|
| 254 |
+
#### and a subtitute for TF 1.5.0 ####
|
| 255 |
+
######################################################################
|
| 256 |
+
|
| 257 |
+
direct_ops = [
|
| 258 |
+
"Cast",
|
| 259 |
+
"Exit",
|
| 260 |
+
"Floor",
|
| 261 |
+
"Identity",
|
| 262 |
+
"LogicalNot",
|
| 263 |
+
"ReverseSequence",
|
| 264 |
+
"Relu6",
|
| 265 |
+
"Sigmoid",
|
| 266 |
+
"Square",
|
| 267 |
+
"Tanh"
|
| 268 |
+
]
|
| 269 |
+
broadcast_ops = [
|
| 270 |
+
"Add",
|
| 271 |
+
"Greater",
|
| 272 |
+
"GreaterEqual",
|
| 273 |
+
"Less",
|
| 274 |
+
"LessEqual",
|
| 275 |
+
"LogicalAnd",
|
| 276 |
+
"LogicalOr",
|
| 277 |
+
"Mul",
|
| 278 |
+
"RealDiv",
|
| 279 |
+
"Sub"
|
| 280 |
+
]
|
| 281 |
+
|
| 282 |
+
|
| 283 |
+
def infer_shape_for_graph_legacy(tf_graph):
|
| 284 |
+
shape_updated = True
|
| 285 |
+
while shape_updated:
|
| 286 |
+
shape_updated = False
|
| 287 |
+
for op in tf_graph.get_operations():
|
| 288 |
+
updated = infer_shape_for_op_legacy(op)
|
| 289 |
+
if updated:
|
| 290 |
+
shape_updated = True
|
| 291 |
+
|
| 292 |
+
return tf_graph
|
| 293 |
+
|
| 294 |
+
|
| 295 |
+
def infer_shape_for_op_legacy(op):
|
| 296 |
+
# invoke tf shape inference first
|
| 297 |
+
infer_shape_for_op(op)
|
| 298 |
+
|
| 299 |
+
has_unknown_input_shape = any(get_tf_tensor_shape(inp) is None for inp in op.inputs)
|
| 300 |
+
has_unknown_output_shape = any(get_tf_tensor_shape(out) is None for out in op.outputs)
|
| 301 |
+
|
| 302 |
+
# an input shape may be inferred from op output or other input shapes
|
| 303 |
+
# try to infer it first
|
| 304 |
+
if has_unknown_input_shape:
|
| 305 |
+
if infer_input_shapes(op):
|
| 306 |
+
return True
|
| 307 |
+
|
| 308 |
+
if not has_unknown_output_shape:
|
| 309 |
+
return False
|
| 310 |
+
|
| 311 |
+
# for those ops, we don't expect all input shapes available to infer output shapes.
|
| 312 |
+
ret = infer_output_shapes_with_partial_inputs(op)
|
| 313 |
+
if ret is not None:
|
| 314 |
+
return ret
|
| 315 |
+
|
| 316 |
+
# for ops, we need all input shapes ready to infer output shapes.
|
| 317 |
+
are_all_input_shape_ready = True
|
| 318 |
+
no_shape = []
|
| 319 |
+
for i in op.inputs:
|
| 320 |
+
if get_tf_tensor_shape(i) is None:
|
| 321 |
+
are_all_input_shape_ready = False
|
| 322 |
+
no_shape.append(i.name)
|
| 323 |
+
|
| 324 |
+
if not are_all_input_shape_ready:
|
| 325 |
+
logger.debug("op %s has inputs don't have shape specified, they are: %s", op.name, no_shape)
|
| 326 |
+
return False
|
| 327 |
+
|
| 328 |
+
if op.type in direct_ops:
|
| 329 |
+
return set_shape_from_input(op.inputs[0], op.outputs[0])
|
| 330 |
+
|
| 331 |
+
if op.type in broadcast_ops:
|
| 332 |
+
return set_shape_from_inputs_broadcast(op.inputs, op.outputs[0])
|
| 333 |
+
|
| 334 |
+
if op.type == "RandomUniform":
|
| 335 |
+
shape_op = op.inputs[0].op
|
| 336 |
+
if not shape_op or shape_op.type != "Shape":
|
| 337 |
+
return False
|
| 338 |
+
return set_shape_from_input(shape_op.inputs[0], op.outputs[0])
|
| 339 |
+
|
| 340 |
+
if op.type == "Gather":
|
| 341 |
+
# uses the follwing link to know how to infer shape of output
|
| 342 |
+
# https://www.tensorflow.org/api_docs/python/tf/gather
|
| 343 |
+
shape_params = get_tf_tensor_shape(op.inputs[0])
|
| 344 |
+
shape_indices = get_tf_tensor_shape(op.inputs[1])
|
| 345 |
+
# gather can only have 2 inputs
|
| 346 |
+
# https://www.tensorflow.org/api_docs/cc/class/tensorflow/ops/gather.html
|
| 347 |
+
if len(op.inputs) == 3:
|
| 348 |
+
axis_op = op.inputs[2].op
|
| 349 |
+
if not utils.is_tf_const_op(axis_op):
|
| 350 |
+
return False
|
| 351 |
+
axis = get_tf_const_value(axis_op)
|
| 352 |
+
else:
|
| 353 |
+
axis = 0
|
| 354 |
+
|
| 355 |
+
shape = shape_params[:axis] + shape_indices + shape_params[axis + 1:]
|
| 356 |
+
op.outputs[0].set_shape(shape)
|
| 357 |
+
return True
|
| 358 |
+
|
| 359 |
+
if op.type in ["All", "Any", "Max", "Min"]:
|
| 360 |
+
axis_op = op.inputs[1].op
|
| 361 |
+
if not utils.is_tf_const_op(axis_op):
|
| 362 |
+
return False
|
| 363 |
+
axis = get_tf_const_value(axis_op)
|
| 364 |
+
if not isinstance(axis, list):
|
| 365 |
+
axis = [axis]
|
| 366 |
+
keep_dims = op.get_attr("keep_dims")
|
| 367 |
+
shape = get_tf_tensor_shape(op.inputs[0])
|
| 368 |
+
for i, _ in enumerate(axis):
|
| 369 |
+
if axis[i] < 0:
|
| 370 |
+
axis[i] += len(shape)
|
| 371 |
+
|
| 372 |
+
new_shape = []
|
| 373 |
+
for i, _ in enumerate(shape):
|
| 374 |
+
if i in axis:
|
| 375 |
+
if keep_dims:
|
| 376 |
+
new_shape.append(1)
|
| 377 |
+
else:
|
| 378 |
+
new_shape.append(shape[i])
|
| 379 |
+
|
| 380 |
+
op.outputs[0].set_shape(new_shape)
|
| 381 |
+
logger.debug("set %s op [%s] with new shape %s", op.type, op.outputs[0].name, new_shape)
|
| 382 |
+
return True
|
| 383 |
+
|
| 384 |
+
if op.type == "ExpandDims":
|
| 385 |
+
# https://www.tensorflow.org/api_docs/python/tf/expand_dims
|
| 386 |
+
input_shape = get_tf_tensor_shape(op.inputs[0])
|
| 387 |
+
dim_op = op.inputs[1].op
|
| 388 |
+
if input_shape is None or not utils.is_tf_const_op(dim_op):
|
| 389 |
+
return False
|
| 390 |
+
|
| 391 |
+
dim = get_tf_const_value(dim_op)
|
| 392 |
+
if dim < 0:
|
| 393 |
+
dim = dim + len(input_shape) + 1
|
| 394 |
+
|
| 395 |
+
new_shape = input_shape[:dim] + [1] + input_shape[dim:]
|
| 396 |
+
op.outputs[0].set_shape(new_shape)
|
| 397 |
+
logger.debug("set [%s] with new shape %s", op.outputs[0].name, new_shape)
|
| 398 |
+
return True
|
| 399 |
+
|
| 400 |
+
if op.type == "Unpack":
|
| 401 |
+
input_shape = get_tf_tensor_shape(op.inputs[0])
|
| 402 |
+
if input_shape is None:
|
| 403 |
+
return False
|
| 404 |
+
|
| 405 |
+
axis = op.get_attr("axis")
|
| 406 |
+
axis = axis if axis >= 0 else axis + len(input_shape)
|
| 407 |
+
# the link below says that the rank of output is "rank(input) -1",
|
| 408 |
+
# from this statement "num" must equal to input_shape[axis], and if not tf will throw a runtime error
|
| 409 |
+
# https://www.tensorflow.org/api_docs/python/tf/unstack
|
| 410 |
+
new_shape = input_shape[:axis] + input_shape[axis + 1:]
|
| 411 |
+
for output in op.outputs:
|
| 412 |
+
output.set_shape(new_shape)
|
| 413 |
+
logger.debug("set %s op [%s] with new shape %s", op.type, output.name, new_shape)
|
| 414 |
+
return True
|
| 415 |
+
|
| 416 |
+
if op.type in ["Minimum", "Maximum"]:
|
| 417 |
+
# ops that are elementwise and support broadcasting
|
| 418 |
+
input_shapes = [get_tf_tensor_shape(op) for op in op.inputs]
|
| 419 |
+
new_shape = broadcast_shape_inference(*input_shapes)
|
| 420 |
+
op.outputs[0].set_shape(new_shape)
|
| 421 |
+
return True
|
| 422 |
+
|
| 423 |
+
return False
|
| 424 |
+
|
| 425 |
+
|
| 426 |
+
def infer_input_shapes(op):
|
| 427 |
+
if op.type in ["Select", "SelectV2"]:
|
| 428 |
+
shape_t = get_tf_tensor_shape(op.inputs[1])
|
| 429 |
+
shape_e = get_tf_tensor_shape(op.inputs[2])
|
| 430 |
+
# copy shape if t OR e does not have a shape, no update if t AND e both have shapes
|
| 431 |
+
if shape_t is None or shape_e is None:
|
| 432 |
+
new_shape = shape_t or shape_e
|
| 433 |
+
if new_shape is not None:
|
| 434 |
+
op.inputs[1].set_shape(new_shape)
|
| 435 |
+
op.inputs[2].set_shape(new_shape)
|
| 436 |
+
logger.debug("set [%s, %s] with new shape %s", op.inputs[1].name, op.inputs[2].name, new_shape)
|
| 437 |
+
return True
|
| 438 |
+
return False
|
| 439 |
+
|
| 440 |
+
|
| 441 |
+
def infer_output_shapes_with_partial_inputs(op):
|
| 442 |
+
# output shape of concat op: only the dim val of concatenated dim will be changed
|
| 443 |
+
# so only partial(at least one) input shapes need to be known to infer output shape of concat op
|
| 444 |
+
if utils.is_tf_concat_op(op):
|
| 445 |
+
data_inputs = op.inputs[:-1]
|
| 446 |
+
input_shapes = [get_tf_tensor_shape(inp) for inp in data_inputs]
|
| 447 |
+
input_shapes = [shape for shape in input_shapes if shape is not None]
|
| 448 |
+
if not input_shapes:
|
| 449 |
+
logger.debug("all input shapes of concat op %s are None, can't infer its output shape", op.name)
|
| 450 |
+
return False
|
| 451 |
+
|
| 452 |
+
new_shape = input_shapes[0]
|
| 453 |
+
axis_op = op.inputs[-1]
|
| 454 |
+
rank = len(new_shape)
|
| 455 |
+
if not utils.is_tf_const_op(axis_op):
|
| 456 |
+
op.outputs[0].set_shape([-1] * rank)
|
| 457 |
+
return True
|
| 458 |
+
|
| 459 |
+
axis = get_tf_const_value(axis_op)
|
| 460 |
+
axis = axis if axis >= 0 else axis + rank
|
| 461 |
+
new_shape[axis] = -1
|
| 462 |
+
if len(input_shapes) == len(data_inputs): # all input shapes are known
|
| 463 |
+
concat_dim_vals = list(np.array(input_shapes)[:, axis])
|
| 464 |
+
# only when inputs' shape are known, then val of concat dim can be calculated
|
| 465 |
+
if concat_dim_vals.count(-1) == 0:
|
| 466 |
+
new_shape[axis] = sum(concat_dim_vals)
|
| 467 |
+
|
| 468 |
+
op.outputs[0].set_shape(new_shape)
|
| 469 |
+
logger.debug("set Concat op [%s] with new shape %s", op.outputs[0].name, new_shape)
|
| 470 |
+
return True
|
| 471 |
+
|
| 472 |
+
if op.type in ["Select", "SelectV2"]:
|
| 473 |
+
new_shape = get_tf_tensor_shape(op.inputs[1])
|
| 474 |
+
if new_shape is None:
|
| 475 |
+
new_shape = get_tf_tensor_shape(op.inputs[2])
|
| 476 |
+
if new_shape is not None:
|
| 477 |
+
op.outputs[0].set_shape(new_shape)
|
| 478 |
+
op.inputs[1].set_shape(new_shape)
|
| 479 |
+
op.inputs[2].set_shape(new_shape)
|
| 480 |
+
logger.debug("set [%s] with new shape %s", op.outputs[0].name, new_shape)
|
| 481 |
+
return True
|
| 482 |
+
return False
|
| 483 |
+
|
| 484 |
+
if op.type == "Pack":
|
| 485 |
+
axis = op.get_attr("axis")
|
| 486 |
+
input_shape = None
|
| 487 |
+
for i in op.inputs:
|
| 488 |
+
s = get_tf_tensor_shape(i)
|
| 489 |
+
if s is not None:
|
| 490 |
+
input_shape = s
|
| 491 |
+
break
|
| 492 |
+
if input_shape is None:
|
| 493 |
+
return False
|
| 494 |
+
if axis < 0:
|
| 495 |
+
axis += len(input_shape)
|
| 496 |
+
for i in op.inputs:
|
| 497 |
+
if not get_tf_tensor_shape(i):
|
| 498 |
+
i.set_shape(input_shape)
|
| 499 |
+
logger.debug("set [%s] with new shape %s", i.name, input_shape)
|
| 500 |
+
new_shape = input_shape[:axis] + [len(op.inputs)] + input_shape[axis:]
|
| 501 |
+
op.outputs[0].set_shape(new_shape)
|
| 502 |
+
logger.debug("set Pack op [%s] with new shape %s", op.outputs[0].name, new_shape)
|
| 503 |
+
return True
|
| 504 |
+
|
| 505 |
+
if op.type == "Pow":
|
| 506 |
+
# https://www.tensorflow.org/api_docs/cc/class/tensorflow/ops/pow
|
| 507 |
+
new_shape = get_tf_tensor_shape(op.inputs[0])
|
| 508 |
+
if new_shape is None:
|
| 509 |
+
new_shape = get_tf_tensor_shape(op.inputs[1])
|
| 510 |
+
if new_shape is not None:
|
| 511 |
+
op.outputs[0].set_shape(new_shape)
|
| 512 |
+
logger.debug("set [%s] with new shape %s", op.outputs[0].name, new_shape)
|
| 513 |
+
return True
|
| 514 |
+
return False
|
| 515 |
+
|
| 516 |
+
return None
|
| 517 |
+
|
| 518 |
+
|
| 519 |
+
def set_shape_from_input(input_tensor, output_tensor):
|
| 520 |
+
new_shape = get_tf_tensor_shape(input_tensor)
|
| 521 |
+
if new_shape is not None:
|
| 522 |
+
output_tensor.set_shape(new_shape)
|
| 523 |
+
logger.debug("set [%s] with new shape %s", output_tensor.name, new_shape)
|
| 524 |
+
return True
|
| 525 |
+
return False
|
| 526 |
+
|
| 527 |
+
|
| 528 |
+
def set_shape_from_inputs_broadcast(input_tensors, output_tensor):
|
| 529 |
+
s1 = get_tf_tensor_shape(input_tensors[0])
|
| 530 |
+
s2 = get_tf_tensor_shape(input_tensors[1])
|
| 531 |
+
new_shape = broadcast_shape_inference(s1, s2)
|
| 532 |
+
if new_shape is not None:
|
| 533 |
+
output_tensor.set_shape(new_shape)
|
| 534 |
+
logger.debug("set [%s] with new shape %s", output_tensor.name, new_shape)
|
| 535 |
+
return True
|
| 536 |
+
return False
|
| 537 |
+
|
| 538 |
+
|
| 539 |
+
def broadcast_shape_inference(shape_0, shape_1):
|
| 540 |
+
if shape_0 is None:
|
| 541 |
+
return shape_1
|
| 542 |
+
if shape_1 is None:
|
| 543 |
+
return shape_0
|
| 544 |
+
|
| 545 |
+
# two dimensions are compatible when they are equal, or one of them is 1
|
| 546 |
+
# compare from last dim
|
| 547 |
+
if len(shape_0) > len(shape_1):
|
| 548 |
+
tmp = shape_0
|
| 549 |
+
shape_0 = shape_1
|
| 550 |
+
shape_1 = tmp
|
| 551 |
+
|
| 552 |
+
new_shape = shape_1
|
| 553 |
+
l = len(shape_0)
|
| 554 |
+
if l == 0:
|
| 555 |
+
return new_shape
|
| 556 |
+
|
| 557 |
+
i = l - 1
|
| 558 |
+
while i >= 0:
|
| 559 |
+
if shape_0[i] == shape_1[i]:
|
| 560 |
+
# do nothing
|
| 561 |
+
pass
|
| 562 |
+
elif shape_0[i] == 1:
|
| 563 |
+
# do nothing
|
| 564 |
+
pass
|
| 565 |
+
elif shape_1[i] == 1:
|
| 566 |
+
new_shape[i] = shape_0[i]
|
| 567 |
+
# maybe one of them is -1, we can use the other one as real shape.
|
| 568 |
+
elif shape_0[i] == -1:
|
| 569 |
+
pass
|
| 570 |
+
elif shape_1[i] == -1:
|
| 571 |
+
new_shape[i] = shape_0[i]
|
| 572 |
+
else:
|
| 573 |
+
logger.warning("two shapes not possible to broadcast, %s, %s", shape_0, shape_1)
|
| 574 |
+
return None
|
| 575 |
+
i -= 1
|
| 576 |
+
return new_shape
|
lib/python3.10/site-packages/tf2onnx/tf_loader.py
ADDED
|
@@ -0,0 +1,639 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
"""Methods to load tensorflow graph from graphdef, checkpoint or saved_model."""
|
| 5 |
+
|
| 6 |
+
from __future__ import absolute_import
|
| 7 |
+
from __future__ import division
|
| 8 |
+
from __future__ import print_function
|
| 9 |
+
from __future__ import unicode_literals
|
| 10 |
+
|
| 11 |
+
import logging
|
| 12 |
+
from distutils.version import LooseVersion
|
| 13 |
+
|
| 14 |
+
import tensorflow as tf
|
| 15 |
+
import numpy as np
|
| 16 |
+
from tensorflow.python.ops import lookup_ops
|
| 17 |
+
|
| 18 |
+
from tf2onnx import utils
|
| 19 |
+
from tf2onnx.tf_utils import get_tf_version, tflist_to_onnx, get_hash_table_info, replace_placeholders_with_tables
|
| 20 |
+
|
| 21 |
+
logger = logging.getLogger(__name__)
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
# pylint: disable=unused-argument,unused-import,no-value-for-parameter,unexpected-keyword-arg,ungrouped-imports
|
| 25 |
+
# pylint: disable=missing-function-docstring,import-outside-toplevel,useless-import-alias,missing-docstring
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def is_tf2():
|
| 29 |
+
return tf.__version__.startswith("2.")
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def _not_implemented_tf_placeholder(name):
|
| 33 |
+
"""Creates a placeholder function for missing Tensorflow imports"""
|
| 34 |
+
|
| 35 |
+
def not_implemented_tf_placeholder(*args, **kwargs):
|
| 36 |
+
raise NotImplementedError(
|
| 37 |
+
f'Tensorflow verison {tf.__version__} does not implement '
|
| 38 |
+
f'`{name}`, try converting your model with a different version.'
|
| 39 |
+
)
|
| 40 |
+
|
| 41 |
+
return not_implemented_tf_placeholder
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
try:
|
| 45 |
+
from tensorflow.python.framework.function_def_to_graph import function_def_to_graph
|
| 46 |
+
except ImportError:
|
| 47 |
+
function_def_to_graph = _not_implemented_tf_placeholder('function_def_to_graph')
|
| 48 |
+
|
| 49 |
+
try:
|
| 50 |
+
# pylint: disable=protected-access
|
| 51 |
+
from tensorflow.python.saved_model.load import _RestoredResource as TfRestoredResourceType
|
| 52 |
+
except ImportError:
|
| 53 |
+
TfRestoredResourceType = tuple() # isinstance(x, tuple()) is always false
|
| 54 |
+
|
| 55 |
+
try:
|
| 56 |
+
from tensorflow.python.training.tracking.tracking import AutoTrackable as TfAutoTrackableType
|
| 57 |
+
except ImportError:
|
| 58 |
+
TfAutoTrackableType = tuple()
|
| 59 |
+
|
| 60 |
+
if is_tf2():
|
| 61 |
+
convert_variables_to_constants = tf.compat.v1.graph_util.convert_variables_to_constants
|
| 62 |
+
from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2
|
| 63 |
+
else:
|
| 64 |
+
from tensorflow.python.framework.graph_util import convert_variables_to_constants
|
| 65 |
+
|
| 66 |
+
convert_variables_to_constants_v2 = _not_implemented_tf_placeholder('convert_variables_to_constants_v2')
|
| 67 |
+
|
| 68 |
+
if is_tf2():
|
| 69 |
+
tf_reset_default_graph = tf.compat.v1.reset_default_graph
|
| 70 |
+
tf_global_variables = tf.compat.v1.global_variables
|
| 71 |
+
tf_session = tf.compat.v1.Session # pylint: disable=invalid-name
|
| 72 |
+
tf_graphdef = tf.compat.v1.GraphDef
|
| 73 |
+
tf_import_meta_graph = tf.compat.v1.train.import_meta_graph
|
| 74 |
+
tf_gfile = tf.io.gfile
|
| 75 |
+
tf_placeholder = tf.compat.v1.placeholder
|
| 76 |
+
tf_placeholder_with_default = tf.compat.v1.placeholder_with_default
|
| 77 |
+
extract_sub_graph = tf.compat.v1.graph_util.extract_sub_graph
|
| 78 |
+
elif LooseVersion(tf.__version__) >= "1.13":
|
| 79 |
+
# 1.13 introduced the compat namespace
|
| 80 |
+
tf_reset_default_graph = tf.compat.v1.reset_default_graph
|
| 81 |
+
tf_global_variables = tf.compat.v1.global_variables
|
| 82 |
+
tf_session = tf.compat.v1.Session # pylint: disable=invalid-name
|
| 83 |
+
tf_graphdef = tf.compat.v1.GraphDef
|
| 84 |
+
tf_import_meta_graph = tf.compat.v1.train.import_meta_graph
|
| 85 |
+
tf_gfile = tf.gfile
|
| 86 |
+
tf_placeholder = tf.compat.v1.placeholder
|
| 87 |
+
tf_placeholder_with_default = tf.compat.v1.placeholder_with_default
|
| 88 |
+
extract_sub_graph = tf.compat.v1.graph_util.extract_sub_graph
|
| 89 |
+
else:
|
| 90 |
+
# older than 1.13
|
| 91 |
+
tf_reset_default_graph = tf.reset_default_graph
|
| 92 |
+
tf_global_variables = tf.global_variables
|
| 93 |
+
tf_session = tf.Session # pylint: disable=invalid-name
|
| 94 |
+
tf_graphdef = tf.GraphDef
|
| 95 |
+
tf_import_meta_graph = tf.train.import_meta_graph
|
| 96 |
+
tf_gfile = tf.gfile
|
| 97 |
+
tf_placeholder = tf.placeholder
|
| 98 |
+
tf_placeholder_with_default = tf.placeholder_with_default
|
| 99 |
+
extract_sub_graph = tf.graph_util.extract_sub_graph
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
def inputs_without_resource(sess, input_names):
|
| 103 |
+
try:
|
| 104 |
+
new_input_names = []
|
| 105 |
+
for n in input_names:
|
| 106 |
+
t = sess.graph.get_tensor_by_name(n)
|
| 107 |
+
if t.dtype != tf.dtypes.resource:
|
| 108 |
+
new_input_names.append(n)
|
| 109 |
+
input_names = new_input_names
|
| 110 |
+
except: # pylint: disable=bare-except
|
| 111 |
+
pass
|
| 112 |
+
return input_names
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
def convert_variables_to_constants_large_model(func):
|
| 116 |
+
# For large models we use internal tf methods as a hack
|
| 117 |
+
|
| 118 |
+
if tf.__version__.startswith("2.2."):
|
| 119 |
+
try:
|
| 120 |
+
from tensorflow.python.framework.convert_to_constants import \
|
| 121 |
+
_convert_variables_to_constants_v2_impl # pylint: disable=protected-access
|
| 122 |
+
except ImportError:
|
| 123 |
+
_not_implemented_tf_placeholder("_convert_variables_to_constants_v2_impl")()
|
| 124 |
+
frozen_graph_def, _ = \
|
| 125 |
+
_convert_variables_to_constants_v2_impl(func, lower_control_flow=False, aggressive_inlining=True)
|
| 126 |
+
return frozen_graph_def
|
| 127 |
+
|
| 128 |
+
try:
|
| 129 |
+
from tensorflow.python.framework.convert_to_constants import \
|
| 130 |
+
_FunctionConverterData, _replace_variables_by_constants # pylint: disable=protected-access
|
| 131 |
+
except ImportError:
|
| 132 |
+
_not_implemented_tf_placeholder("_replace_variables_by_constants")()
|
| 133 |
+
converter_data = _FunctionConverterData(func=func, lower_control_flow=False, aggressive_inlining=True)
|
| 134 |
+
frozen_graph_def, _ = _replace_variables_by_constants(converter_data=converter_data)
|
| 135 |
+
return frozen_graph_def
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
def from_function(func, input_names, output_names, large_model=False):
|
| 139 |
+
if large_model:
|
| 140 |
+
return convert_variables_to_constants_large_model(func)
|
| 141 |
+
|
| 142 |
+
if get_tf_version() < LooseVersion("2.2"):
|
| 143 |
+
frozen_func = convert_variables_to_constants_v2(func, lower_control_flow=False)
|
| 144 |
+
else:
|
| 145 |
+
frozen_func = convert_variables_to_constants_v2(func, lower_control_flow=False, aggressive_inlining=True)
|
| 146 |
+
graph_def = frozen_func.graph.as_graph_def(add_shapes=True)
|
| 147 |
+
# output_names = [i.name for i in frozen_func.outputs]
|
| 148 |
+
with tf.Graph().as_default() as tf_graph:
|
| 149 |
+
with tf_session(graph=tf_graph) as sess:
|
| 150 |
+
tf.import_graph_def(graph_def, name='')
|
| 151 |
+
input_names = inputs_without_resource(sess, input_names)
|
| 152 |
+
graph_def = tf_optimize(input_names, output_names, graph_def)
|
| 153 |
+
return graph_def
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
def freeze_session(sess, input_names=None, output_names=None):
|
| 157 |
+
"""Freezes the state of a session into a pruned computation graph."""
|
| 158 |
+
output_node_names = [i.split(':')[:-1][0] for i in output_names]
|
| 159 |
+
keep_var_names = [i.split(':')[:-1][0] for i in input_names]
|
| 160 |
+
with sess.graph.as_default():
|
| 161 |
+
output_node_names = output_node_names or []
|
| 162 |
+
output_node_names += [v.op.name for v in tf_global_variables()]
|
| 163 |
+
output_node_names += keep_var_names
|
| 164 |
+
graph_def = sess.graph.as_graph_def(add_shapes=True)
|
| 165 |
+
for node in graph_def.node:
|
| 166 |
+
node.device = ""
|
| 167 |
+
graph_def = convert_variables_to_constants(sess, graph_def, output_node_names)
|
| 168 |
+
return graph_def
|
| 169 |
+
|
| 170 |
+
|
| 171 |
+
def remove_redundant_inputs(frozen_graph, input_names):
|
| 172 |
+
"""Remove redundant inputs not in frozen graph."""
|
| 173 |
+
frozen_inputs = []
|
| 174 |
+
# get inputs in frozen graph
|
| 175 |
+
node_names = set(n.name for n in frozen_graph.node)
|
| 176 |
+
frozen_inputs = [inp for inp in input_names if utils.node_name(inp) in node_names]
|
| 177 |
+
deleted_inputs = list(set(input_names) - set(frozen_inputs))
|
| 178 |
+
if deleted_inputs:
|
| 179 |
+
logger.warning("inputs [%s] is not in frozen graph, delete them", ",".join(deleted_inputs))
|
| 180 |
+
return frozen_inputs
|
| 181 |
+
|
| 182 |
+
|
| 183 |
+
def from_graphdef(model_path, input_names, output_names):
|
| 184 |
+
"""Load tensorflow graph from graphdef."""
|
| 185 |
+
# make sure we start with clean default graph
|
| 186 |
+
tf_reset_default_graph()
|
| 187 |
+
with tf_session() as sess:
|
| 188 |
+
graph_def = tf_graphdef()
|
| 189 |
+
with tf_gfile.GFile(model_path, 'rb') as f:
|
| 190 |
+
try:
|
| 191 |
+
content = f.read()
|
| 192 |
+
except Exception as e:
|
| 193 |
+
raise OSError(
|
| 194 |
+
"Unable to load file '{}'.".format(model_path)) from e
|
| 195 |
+
try:
|
| 196 |
+
graph_def.ParseFromString(content)
|
| 197 |
+
except Exception as e:
|
| 198 |
+
raise RuntimeError(
|
| 199 |
+
"Unable to parse file '{}'.".format(model_path)) from e
|
| 200 |
+
tf.import_graph_def(graph_def, name='')
|
| 201 |
+
input_names = inputs_without_resource(sess, input_names)
|
| 202 |
+
frozen_graph = freeze_session(sess, input_names=input_names, output_names=output_names)
|
| 203 |
+
input_names = remove_redundant_inputs(frozen_graph, input_names)
|
| 204 |
+
|
| 205 |
+
tf_reset_default_graph()
|
| 206 |
+
with tf_session() as sess:
|
| 207 |
+
input_names = inputs_without_resource(sess, input_names)
|
| 208 |
+
frozen_graph = tf_optimize(input_names, output_names, frozen_graph)
|
| 209 |
+
tf_reset_default_graph()
|
| 210 |
+
return frozen_graph, input_names, output_names
|
| 211 |
+
|
| 212 |
+
|
| 213 |
+
def from_checkpoint(model_path, input_names, output_names):
|
| 214 |
+
"""Load tensorflow graph from checkpoint."""
|
| 215 |
+
# make sure we start with clean default graph
|
| 216 |
+
tf_reset_default_graph()
|
| 217 |
+
# model_path = checkpoint/checkpoint.meta
|
| 218 |
+
with tf.device("/cpu:0"):
|
| 219 |
+
with tf_session() as sess:
|
| 220 |
+
saver = tf_import_meta_graph(model_path, clear_devices=True)
|
| 221 |
+
# restore from model_path minus the ".meta"
|
| 222 |
+
saver.restore(sess, model_path[:-5])
|
| 223 |
+
input_names = inputs_without_resource(sess, input_names)
|
| 224 |
+
frozen_graph = freeze_session(sess, input_names=input_names, output_names=output_names)
|
| 225 |
+
input_names = remove_redundant_inputs(frozen_graph, input_names)
|
| 226 |
+
|
| 227 |
+
tf_reset_default_graph()
|
| 228 |
+
with tf_session() as sess:
|
| 229 |
+
frozen_graph = tf_optimize(input_names, output_names, frozen_graph)
|
| 230 |
+
tf_reset_default_graph()
|
| 231 |
+
return frozen_graph, input_names, output_names
|
| 232 |
+
|
| 233 |
+
|
| 234 |
+
def _from_saved_model_v1(sess, model_path, input_names, output_names, tag, signatures):
|
| 235 |
+
"""Load tensorflow graph from saved_model."""
|
| 236 |
+
|
| 237 |
+
wrn_no_tag = "'--tag' not specified for saved_model. Using --tag serve"
|
| 238 |
+
wrn_empty_tag = "'--tag' value is empty string. Using tag =[[]]"
|
| 239 |
+
|
| 240 |
+
if tag is None:
|
| 241 |
+
tag = [tf.saved_model.tag_constants.SERVING]
|
| 242 |
+
logger.warning(wrn_no_tag)
|
| 243 |
+
|
| 244 |
+
if tag == '':
|
| 245 |
+
tag = [[]]
|
| 246 |
+
logger.warning(wrn_empty_tag)
|
| 247 |
+
|
| 248 |
+
if not isinstance(tag, list):
|
| 249 |
+
tag = [tag]
|
| 250 |
+
|
| 251 |
+
imported = tf.saved_model.loader.load(sess, tag, model_path)
|
| 252 |
+
for k in imported.signature_def.keys():
|
| 253 |
+
if k.startswith("_"):
|
| 254 |
+
# consider signatures starting with '_' private
|
| 255 |
+
continue
|
| 256 |
+
signatures.append(k)
|
| 257 |
+
try:
|
| 258 |
+
from tensorflow.contrib.saved_model.python.saved_model import signature_def_utils
|
| 259 |
+
# pylint: disable=unnecessary-lambda
|
| 260 |
+
get_signature_def = lambda meta_graph_def, k: \
|
| 261 |
+
signature_def_utils.get_signature_def_by_key(meta_graph_def, k)
|
| 262 |
+
except ImportError:
|
| 263 |
+
# TF1.12 changed the api
|
| 264 |
+
get_signature_def = lambda meta_graph_def, k: meta_graph_def.signature_def[k]
|
| 265 |
+
|
| 266 |
+
if input_names is None:
|
| 267 |
+
input_names = []
|
| 268 |
+
for k in signatures:
|
| 269 |
+
inputs_tensor_info = get_signature_def(imported, k).inputs
|
| 270 |
+
for _, input_tensor in inputs_tensor_info.items():
|
| 271 |
+
if input_tensor.name not in input_names:
|
| 272 |
+
input_names.append(input_tensor.name)
|
| 273 |
+
tensors_to_rename = {}
|
| 274 |
+
if output_names is None:
|
| 275 |
+
output_names = []
|
| 276 |
+
for k in signatures:
|
| 277 |
+
outputs_tensor_info = get_signature_def(imported, k).outputs
|
| 278 |
+
for structured_name, output_tensor in outputs_tensor_info.items():
|
| 279 |
+
if output_tensor.name not in output_names:
|
| 280 |
+
output_names.append(output_tensor.name)
|
| 281 |
+
tensors_to_rename[output_tensor.name] = structured_name
|
| 282 |
+
frozen_graph = freeze_session(sess, input_names=input_names, output_names=output_names)
|
| 283 |
+
return frozen_graph, input_names, output_names, tensors_to_rename
|
| 284 |
+
|
| 285 |
+
|
| 286 |
+
def _get_hash_table_info_from_trackable(trackable, table_names, key_dtypes, value_dtypes,
|
| 287 |
+
removed_resource_to_placeholder, placeholder_to_table_info):
|
| 288 |
+
# pylint: disable=protected-access
|
| 289 |
+
for r in trackable.__dict__.values():
|
| 290 |
+
if isinstance(r, TfRestoredResourceType) and hasattr(r, '_create_resource'):
|
| 291 |
+
try:
|
| 292 |
+
table_handle = id(r.resource_handle)
|
| 293 |
+
except Exception: # pylint: disable=broad-except
|
| 294 |
+
continue
|
| 295 |
+
initializer = r._create_resource.concrete_functions[0].function_def
|
| 296 |
+
new_names, new_k_dtypes, new_v_dtypes = get_hash_table_info(initializer.node_def)
|
| 297 |
+
table_names.extend(new_names)
|
| 298 |
+
key_dtypes.extend(new_k_dtypes)
|
| 299 |
+
value_dtypes.extend(new_v_dtypes)
|
| 300 |
+
if table_handle in removed_resource_to_placeholder and len(new_names) == 1:
|
| 301 |
+
table_info = (new_names[0], new_k_dtypes[0], new_v_dtypes[0])
|
| 302 |
+
placeholder_to_table_info[removed_resource_to_placeholder[table_handle]] = table_info
|
| 303 |
+
if isinstance(r, TfAutoTrackableType):
|
| 304 |
+
_get_hash_table_info_from_trackable(r, table_names, key_dtypes, value_dtypes,
|
| 305 |
+
removed_resource_to_placeholder, placeholder_to_table_info)
|
| 306 |
+
|
| 307 |
+
|
| 308 |
+
def _remove_non_variable_resources_from_captures(concrete_func):
|
| 309 |
+
"""
|
| 310 |
+
Removes all non-variable resources (such as tables) from a function's captured inputs to prevent tf from
|
| 311 |
+
raising a 'cannot convert dtype resource to numpy' error while freezing the graph.
|
| 312 |
+
"""
|
| 313 |
+
# pylint: disable=protected-access
|
| 314 |
+
resource_id_to_placeholder = {}
|
| 315 |
+
graph_captures_copy = None
|
| 316 |
+
func_captures_copy = None
|
| 317 |
+
if hasattr(concrete_func.graph, '_captures') and hasattr(concrete_func, '_captured_inputs'):
|
| 318 |
+
graph_captures_copy = concrete_func.graph._captures.copy()
|
| 319 |
+
func_captures_copy = concrete_func._captured_inputs.copy()
|
| 320 |
+
variable_handles = {id(v.handle) for v in concrete_func.graph.variables}
|
| 321 |
+
for k, v in list(concrete_func.graph._captures.items()):
|
| 322 |
+
val_tensor, name_tensor = v
|
| 323 |
+
if val_tensor.dtype == tf.resource and id(val_tensor) not in variable_handles:
|
| 324 |
+
resource_id_to_placeholder[id(val_tensor)] = name_tensor.name.split(':')[0]
|
| 325 |
+
del concrete_func.graph._captures[k]
|
| 326 |
+
for i in reversed(range(len(concrete_func._captured_inputs))):
|
| 327 |
+
if concrete_func._captured_inputs[i] is val_tensor:
|
| 328 |
+
concrete_func._captured_inputs.pop(i)
|
| 329 |
+
elif val_tensor.dtype != tf.resource:
|
| 330 |
+
npval = val_tensor.numpy()
|
| 331 |
+
if not hasattr(npval, 'dtype'):
|
| 332 |
+
# Hack around a TF bug until PR is merged: https://github.com/tensorflow/tensorflow/pull/45610
|
| 333 |
+
arr = np.array(npval)
|
| 334 |
+
val_tensor.numpy = lambda arr=arr: arr
|
| 335 |
+
else:
|
| 336 |
+
logger.warning(
|
| 337 |
+
"Could not search for non-variable resources. Concrete function internal representation may have changed.")
|
| 338 |
+
return resource_id_to_placeholder, graph_captures_copy, func_captures_copy
|
| 339 |
+
|
| 340 |
+
|
| 341 |
+
def _restore_captured_resources(concrete_func, graph_captures_copy, func_captures_copy):
|
| 342 |
+
"""Undoes effect of _remove_non_variable_resources_from_captures on concrete_func"""
|
| 343 |
+
# pylint: disable=protected-access
|
| 344 |
+
if hasattr(concrete_func.graph, '_captures') and hasattr(concrete_func, '_captured_inputs'):
|
| 345 |
+
concrete_func.graph._captures = graph_captures_copy
|
| 346 |
+
concrete_func._captured_inputs = func_captures_copy
|
| 347 |
+
|
| 348 |
+
|
| 349 |
+
def _from_saved_model_v2(model_path, input_names, output_names, tag, signature_def,
|
| 350 |
+
concrete_function_index, large_model):
|
| 351 |
+
"""Load tensorflow graph from saved_model."""
|
| 352 |
+
|
| 353 |
+
wrn_no_tag = "'--tag' not specified for saved_model. Using --tag serve"
|
| 354 |
+
wrn_empty_tag = "'--tag' value is empty string. Using tag =[[]]"
|
| 355 |
+
wrn_sig_1 = "'--signature_def' not specified, using first signature: %s"
|
| 356 |
+
err_many_sig = "Cannot load multiple signature defs in TF2.x: %s"
|
| 357 |
+
err_no_call = "Model doesn't contain usable concrete functions under __call__. Try --signature-def instead."
|
| 358 |
+
err_index = "Invalid concrete_function value: %i. Valid values are [0 to %i]"
|
| 359 |
+
err_no_sig = "No signatures found in model. Try --concrete_function instead."
|
| 360 |
+
err_sig_nomatch = "Specified signature not in model %s"
|
| 361 |
+
err_large_model = "model exceeds maximum protobuf size of 2GB. Try running with --large_model flag."
|
| 362 |
+
|
| 363 |
+
if tag is None:
|
| 364 |
+
tag = ['serve']
|
| 365 |
+
logger.warning(wrn_no_tag)
|
| 366 |
+
|
| 367 |
+
if tag == '':
|
| 368 |
+
tag = [[]]
|
| 369 |
+
logger.warning(wrn_empty_tag)
|
| 370 |
+
|
| 371 |
+
utils.make_sure(len(signature_def) < 2, err_many_sig, str(signature_def))
|
| 372 |
+
imported = tf.saved_model.load(model_path, tags=tag) # pylint: disable=no-value-for-parameter
|
| 373 |
+
|
| 374 |
+
all_sigs = imported.signatures.keys()
|
| 375 |
+
valid_sigs = [s for s in all_sigs if not s.startswith("_")]
|
| 376 |
+
logger.info("Signatures found in model: %s", "[" + ",".join(valid_sigs) + "].")
|
| 377 |
+
|
| 378 |
+
concrete_func = None
|
| 379 |
+
if concrete_function_index is not None:
|
| 380 |
+
utils.make_sure(hasattr(imported, "__call__"), err_no_call)
|
| 381 |
+
utils.make_sure(concrete_function_index < len(imported.__call__.concrete_functions),
|
| 382 |
+
err_index, concrete_function_index, len(imported.__call__.concrete_functions) - 1)
|
| 383 |
+
args, kwargs = imported.__call__.concrete_functions[concrete_function_index].structured_input_signature
|
| 384 |
+
concrete_func = imported.__call__.get_concrete_function(*args, **kwargs)
|
| 385 |
+
elif signature_def:
|
| 386 |
+
utils.make_sure(signature_def[0] in valid_sigs, err_sig_nomatch, signature_def[0])
|
| 387 |
+
concrete_func = imported.signatures[signature_def[0]]
|
| 388 |
+
else:
|
| 389 |
+
utils.make_sure(len(valid_sigs) > 0, err_no_sig)
|
| 390 |
+
logger.warning(wrn_sig_1, valid_sigs[0])
|
| 391 |
+
concrete_func = imported.signatures[valid_sigs[0]]
|
| 392 |
+
|
| 393 |
+
tensors_to_rename = {}
|
| 394 |
+
if input_names is None:
|
| 395 |
+
inputs = [tensor.name for tensor in concrete_func.inputs if tensor.dtype != tf.dtypes.resource]
|
| 396 |
+
if concrete_func.structured_input_signature is not None:
|
| 397 |
+
args, kwargs = concrete_func.structured_input_signature
|
| 398 |
+
structured_inputs = [t.name for t in args if isinstance(t, tf.TensorSpec)] + sorted(kwargs.keys())
|
| 399 |
+
structured_inputs = set(inp + ":0" for inp in structured_inputs)
|
| 400 |
+
if any(inp in structured_inputs for inp in inputs):
|
| 401 |
+
inputs = [inp for inp in inputs if inp in structured_inputs]
|
| 402 |
+
else:
|
| 403 |
+
inputs = input_names
|
| 404 |
+
|
| 405 |
+
if output_names is None:
|
| 406 |
+
outputs = [tensor.name for tensor in concrete_func.outputs if tensor.dtype != tf.dtypes.resource]
|
| 407 |
+
if isinstance(concrete_func.structured_outputs, dict):
|
| 408 |
+
# outputs are sorted, sort structured_outputs the same way
|
| 409 |
+
structured_outputs = sorted(concrete_func.structured_outputs.keys())
|
| 410 |
+
tensors_to_rename.update(zip(outputs, structured_outputs))
|
| 411 |
+
logger.info("Output names: %r", structured_outputs)
|
| 412 |
+
else:
|
| 413 |
+
logger.info("Output names: %r", outputs)
|
| 414 |
+
else:
|
| 415 |
+
outputs = output_names
|
| 416 |
+
logger.info("Outputs not left as None; will use provided names not structured output names.")
|
| 417 |
+
|
| 418 |
+
# Avoid errors due to bug in TF freezing
|
| 419 |
+
removed_resource_to_placeholder, graph_captures_copy, func_captures_copy = \
|
| 420 |
+
_remove_non_variable_resources_from_captures(concrete_func)
|
| 421 |
+
|
| 422 |
+
try:
|
| 423 |
+
frozen_graph = from_function(concrete_func, inputs, outputs, large_model)
|
| 424 |
+
except ValueError as e:
|
| 425 |
+
if any(msg in str(e) for msg in ["exceeds maximum protobuf size of 2GB", "string too long"]):
|
| 426 |
+
raise ValueError(err_large_model)
|
| 427 |
+
raise e
|
| 428 |
+
|
| 429 |
+
# We might be returning the concrete_func so let's put it back in working order
|
| 430 |
+
_restore_captured_resources(concrete_func, graph_captures_copy, func_captures_copy)
|
| 431 |
+
|
| 432 |
+
table_names, key_dtypes, value_dtypes = get_hash_table_info(frozen_graph)
|
| 433 |
+
placeholder_to_table_info = {}
|
| 434 |
+
_get_hash_table_info_from_trackable(imported, table_names, key_dtypes, value_dtypes,
|
| 435 |
+
removed_resource_to_placeholder, placeholder_to_table_info)
|
| 436 |
+
|
| 437 |
+
initialized_tables = {}
|
| 438 |
+
for n, k_dtype, val_dtype in zip(table_names, key_dtypes, value_dtypes):
|
| 439 |
+
h = lookup_ops.hash_table_v2(k_dtype, val_dtype, shared_name=n)
|
| 440 |
+
try:
|
| 441 |
+
k, v = lookup_ops.lookup_table_export_v2(h, k_dtype, val_dtype)
|
| 442 |
+
initialized_tables[n] = (k.numpy(), v.numpy())
|
| 443 |
+
except Exception: # pylint: disable=broad-except
|
| 444 |
+
logger.warning("Could not initialize table with shared_name = %r", n)
|
| 445 |
+
|
| 446 |
+
for placeholder in removed_resource_to_placeholder.values():
|
| 447 |
+
if placeholder not in placeholder_to_table_info:
|
| 448 |
+
logger.error("Could not find table resource to replace placeholder %s", placeholder)
|
| 449 |
+
|
| 450 |
+
replace_placeholders_with_tables(frozen_graph, placeholder_to_table_info)
|
| 451 |
+
|
| 452 |
+
return frozen_graph, inputs, outputs, concrete_func, imported, initialized_tables, tensors_to_rename
|
| 453 |
+
|
| 454 |
+
|
| 455 |
+
def from_saved_model(model_path, input_names, output_names, tag=None,
|
| 456 |
+
signatures=None, concrete_function=None, large_model=False,
|
| 457 |
+
return_concrete_func=False, return_initialized_tables=False, return_tensors_to_rename=False):
|
| 458 |
+
"""Load tensorflow graph from saved_model."""
|
| 459 |
+
if signatures is None:
|
| 460 |
+
signatures = []
|
| 461 |
+
tf_reset_default_graph()
|
| 462 |
+
with tf.device("/cpu:0"):
|
| 463 |
+
if is_tf2():
|
| 464 |
+
frozen_graph, input_names, output_names, concrete_func, imported, initialized_tables, tensors_to_rename = \
|
| 465 |
+
_from_saved_model_v2(model_path, input_names, output_names,
|
| 466 |
+
tag, signatures, concrete_function, large_model)
|
| 467 |
+
result = [frozen_graph, input_names, output_names]
|
| 468 |
+
if return_concrete_func:
|
| 469 |
+
result += [concrete_func, imported]
|
| 470 |
+
if return_initialized_tables:
|
| 471 |
+
result += [initialized_tables]
|
| 472 |
+
if return_tensors_to_rename:
|
| 473 |
+
result += [tensors_to_rename]
|
| 474 |
+
else:
|
| 475 |
+
with tf_session() as sess:
|
| 476 |
+
frozen_graph, input_names, output_names, tensors_to_rename = \
|
| 477 |
+
_from_saved_model_v1(sess, model_path, input_names, output_names, tag, signatures)
|
| 478 |
+
result = [frozen_graph, input_names, output_names]
|
| 479 |
+
if return_initialized_tables:
|
| 480 |
+
result += [{}]
|
| 481 |
+
if return_tensors_to_rename:
|
| 482 |
+
result += [tensors_to_rename]
|
| 483 |
+
tf_reset_default_graph()
|
| 484 |
+
return result
|
| 485 |
+
|
| 486 |
+
|
| 487 |
+
def from_keras(model_path, input_names, output_names):
|
| 488 |
+
"""Load keras model - experimental for now."""
|
| 489 |
+
from tensorflow.python import keras as _keras
|
| 490 |
+
from tensorflow.python.eager import context
|
| 491 |
+
from tensorflow.python.keras.saving import saving_utils as _saving_utils
|
| 492 |
+
|
| 493 |
+
# Handles Keras when Eager mode is enabled.
|
| 494 |
+
custom_objects = None
|
| 495 |
+
with tf.device("/cpu:0"):
|
| 496 |
+
if context.executing_eagerly():
|
| 497 |
+
_keras.backend.clear_session()
|
| 498 |
+
_keras.backend.set_learning_phase(False)
|
| 499 |
+
keras_model = _keras.models.load_model(model_path, custom_objects)
|
| 500 |
+
|
| 501 |
+
function = _saving_utils.trace_model_call(keras_model)
|
| 502 |
+
concrete_func = function.get_concrete_function()
|
| 503 |
+
# allow to pass inputs and outputs from caller if we don't want all of them
|
| 504 |
+
input_names = [input_tensor.name for input_tensor in concrete_func.inputs
|
| 505 |
+
if input_tensor.dtype != tf.dtypes.resource]
|
| 506 |
+
output_names = [output_tensor.name for output_tensor in concrete_func.outputs
|
| 507 |
+
if output_tensor.dtype != tf.dtypes.resource]
|
| 508 |
+
frozen_graph = from_function(concrete_func, input_names, output_names)
|
| 509 |
+
else:
|
| 510 |
+
# Handles Keras when Eager mode is disabled.
|
| 511 |
+
_keras.backend.clear_session()
|
| 512 |
+
_keras.backend.set_learning_phase(False)
|
| 513 |
+
keras_model = _keras.models.load_model(model_path, custom_objects)
|
| 514 |
+
# allow to pass inputs and outputs from caller if we don't want all of them
|
| 515 |
+
input_names = keras_model.inputs
|
| 516 |
+
output_names = keras_model.outputs
|
| 517 |
+
sess = _keras.backend.get_session()
|
| 518 |
+
input_names = inputs_without_resource(sess, input_names)
|
| 519 |
+
frozen_graph = freeze_session(sess, input_names=input_names, output_names=output_names)
|
| 520 |
+
tf_reset_default_graph()
|
| 521 |
+
with tf_session() as sess:
|
| 522 |
+
frozen_graph = tf_optimize(input_names, output_names, frozen_graph)
|
| 523 |
+
tf_reset_default_graph()
|
| 524 |
+
return frozen_graph, input_names, output_names
|
| 525 |
+
|
| 526 |
+
|
| 527 |
+
def tf_optimize_grappler(input_names, output_names, graph_def, fold_constant=None):
|
| 528 |
+
from tensorflow.core.protobuf import meta_graph_pb2 as meta_graph_pb2, config_pb2, rewriter_config_pb2
|
| 529 |
+
from tensorflow.python.grappler import tf_optimizer as tf_opt
|
| 530 |
+
|
| 531 |
+
config = config_pb2.ConfigProto()
|
| 532 |
+
rewrite_options = config.graph_options.rewrite_options
|
| 533 |
+
config.graph_options.infer_shapes = True
|
| 534 |
+
# TODO: if we turn on pruning, grappler removes some identities that the tf-1.x lstm rewriter
|
| 535 |
+
# depends on so for now don't turn this on.
|
| 536 |
+
rewrite_options.optimizers[:] = [
|
| 537 |
+
# 'pruning', 'constfold', 'arithmetic', 'dependency', 'function',
|
| 538 |
+
'constfold', 'function'
|
| 539 |
+
]
|
| 540 |
+
meta_graph = tf.compat.v1.train.export_meta_graph(graph_def=graph_def)
|
| 541 |
+
fetch_collection = meta_graph_pb2.CollectionDef()
|
| 542 |
+
for t in input_names + output_names:
|
| 543 |
+
fetch_collection.node_list.value.append(t)
|
| 544 |
+
meta_graph.collection_def["train_op"].CopyFrom(fetch_collection)
|
| 545 |
+
graph_def = tf_opt.OptimizeGraph(config, meta_graph)
|
| 546 |
+
return graph_def
|
| 547 |
+
|
| 548 |
+
|
| 549 |
+
def tf_optimize(input_names, output_names, graph_def, fold_constant=True):
|
| 550 |
+
"""Extract inference subgraph and optimize graph."""
|
| 551 |
+
assert isinstance(input_names, list)
|
| 552 |
+
assert isinstance(output_names, list)
|
| 553 |
+
|
| 554 |
+
# TODO: is this needed ?
|
| 555 |
+
needed_names = [utils.node_name(i) for i in input_names] + \
|
| 556 |
+
[utils.node_name(i) for i in output_names]
|
| 557 |
+
graph_def = extract_sub_graph(graph_def, needed_names)
|
| 558 |
+
|
| 559 |
+
want_grappler = is_tf2() or LooseVersion(tf.__version__) >= "1.15"
|
| 560 |
+
if want_grappler:
|
| 561 |
+
graph_def = tf_optimize_grappler(input_names, output_names, graph_def, fold_constant)
|
| 562 |
+
else:
|
| 563 |
+
# the older transform path
|
| 564 |
+
from tensorflow.tools.graph_transforms import TransformGraph # pylint: disable=redefined-outer-name
|
| 565 |
+
transforms = [
|
| 566 |
+
"fold_constants(ignore_errors=true)",
|
| 567 |
+
"remove_attribute(attribute_name=_class)", # remove node colocation attributes
|
| 568 |
+
"fold_batch_norms",
|
| 569 |
+
"fold_old_batch_norms",
|
| 570 |
+
]
|
| 571 |
+
graph_def = TransformGraph(graph_def, input_names, output_names, transforms)
|
| 572 |
+
|
| 573 |
+
return graph_def
|
| 574 |
+
|
| 575 |
+
|
| 576 |
+
def tf_reload_graph(tf_graph):
|
| 577 |
+
"""Invoke tensorflow cpp shape inference by reloading graph_def."""
|
| 578 |
+
# invoke c api if tf version is below 1.8
|
| 579 |
+
if get_tf_version() < LooseVersion("1.8"):
|
| 580 |
+
logger.debug(
|
| 581 |
+
"On TF < 1.8, graph is constructed by python API, "
|
| 582 |
+
"which doesn't invoke shape inference, please set "
|
| 583 |
+
"TF_C_API_GRAPH_CONSTRUCTION=1 to enable it"
|
| 584 |
+
)
|
| 585 |
+
|
| 586 |
+
graph_def = tf_graph.as_graph_def(add_shapes=True)
|
| 587 |
+
with tf.Graph().as_default() as inferred_graph:
|
| 588 |
+
tf.import_graph_def(graph_def, name="")
|
| 589 |
+
return inferred_graph
|
| 590 |
+
|
| 591 |
+
|
| 592 |
+
def is_function(g):
|
| 593 |
+
if is_tf2():
|
| 594 |
+
return 'tensorflow.python.framework.func_graph.FuncGraph' in str(type(g))
|
| 595 |
+
return False
|
| 596 |
+
|
| 597 |
+
|
| 598 |
+
_FUNCTIONS = {}
|
| 599 |
+
|
| 600 |
+
|
| 601 |
+
def resolve_functions(tf_graph):
|
| 602 |
+
def toposort(data):
|
| 603 |
+
while True:
|
| 604 |
+
ordered = set(item for item, dep in data.items() if not dep)
|
| 605 |
+
if not ordered:
|
| 606 |
+
break
|
| 607 |
+
yield ordered
|
| 608 |
+
data = {item: (dep - ordered) for item, dep in data.items() if item not in ordered}
|
| 609 |
+
|
| 610 |
+
_, _, _, _, _, functions = tflist_to_onnx(tf_graph, {})
|
| 611 |
+
data = {}
|
| 612 |
+
for k, fdef in tf_graph._functions.items(): # pylint: disable=protected-access
|
| 613 |
+
input_shapes = functions.get(k)
|
| 614 |
+
fdef = fdef.definition
|
| 615 |
+
if input_shapes and len(fdef.signature.input_arg) < len(input_shapes):
|
| 616 |
+
input_shapes = input_shapes[:len(fdef.signature.input_arg)]
|
| 617 |
+
try:
|
| 618 |
+
func = function_def_to_graph(fdef, input_shapes=input_shapes)
|
| 619 |
+
except: # pylint: disable=bare-except
|
| 620 |
+
# if there is a missmatch between caller and function use the functions shape
|
| 621 |
+
logger.warning("shape missmatch between caller and function: %s", k)
|
| 622 |
+
func = function_def_to_graph(fdef)
|
| 623 |
+
_FUNCTIONS[k] = func
|
| 624 |
+
_, _, _, _, _, tfunctions = tflist_to_onnx(func, {})
|
| 625 |
+
functions.update(tfunctions)
|
| 626 |
+
data[k] = set(tfunctions.keys())
|
| 627 |
+
|
| 628 |
+
result = []
|
| 629 |
+
for d in toposort(data):
|
| 630 |
+
result.extend(list(d))
|
| 631 |
+
return [_FUNCTIONS[k] for k in result]
|
| 632 |
+
|
| 633 |
+
|
| 634 |
+
def set_function(name, func):
|
| 635 |
+
_FUNCTIONS[name] = func
|
| 636 |
+
|
| 637 |
+
|
| 638 |
+
def find_function(name):
|
| 639 |
+
return _FUNCTIONS.get(name)
|
lib/python3.10/site-packages/tf2onnx/tf_utils.py
ADDED
|
@@ -0,0 +1,460 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
"""
|
| 5 |
+
tf2onnx.tf_utils - misc utilities for tf2onnx that interface with tensorflow
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
from __future__ import division
|
| 9 |
+
from __future__ import print_function
|
| 10 |
+
from __future__ import unicode_literals
|
| 11 |
+
|
| 12 |
+
import collections
|
| 13 |
+
from distutils.version import LooseVersion
|
| 14 |
+
|
| 15 |
+
import numpy as np
|
| 16 |
+
import tensorflow as tf
|
| 17 |
+
|
| 18 |
+
from tensorflow.core.framework import types_pb2, tensor_pb2, graph_pb2
|
| 19 |
+
from tensorflow.python.framework import tensor_util
|
| 20 |
+
|
| 21 |
+
from onnx import helper, onnx_pb, numpy_helper
|
| 22 |
+
|
| 23 |
+
from tf2onnx.utils import make_sure, is_tf_const_op, port_name, map_onnx_to_numpy_type
|
| 24 |
+
from . import logging
|
| 25 |
+
|
| 26 |
+
logger = logging.getLogger(__name__)
|
| 27 |
+
|
| 28 |
+
#
|
| 29 |
+
# mapping dtypes from tensorflow to onnx
|
| 30 |
+
#
|
| 31 |
+
TF_TO_ONNX_DTYPE = {
|
| 32 |
+
types_pb2.DT_FLOAT: onnx_pb.TensorProto.FLOAT,
|
| 33 |
+
types_pb2.DT_HALF: onnx_pb.TensorProto.FLOAT16,
|
| 34 |
+
types_pb2.DT_BFLOAT16: onnx_pb.TensorProto.FLOAT16,
|
| 35 |
+
types_pb2.DT_DOUBLE: onnx_pb.TensorProto.DOUBLE,
|
| 36 |
+
types_pb2.DT_INT32: onnx_pb.TensorProto.INT32,
|
| 37 |
+
types_pb2.DT_INT16: onnx_pb.TensorProto.INT16,
|
| 38 |
+
types_pb2.DT_INT8: onnx_pb.TensorProto.INT8,
|
| 39 |
+
types_pb2.DT_UINT8: onnx_pb.TensorProto.UINT8,
|
| 40 |
+
types_pb2.DT_UINT16: onnx_pb.TensorProto.UINT16,
|
| 41 |
+
types_pb2.DT_INT64: onnx_pb.TensorProto.INT64,
|
| 42 |
+
types_pb2.DT_STRING: onnx_pb.TensorProto.STRING,
|
| 43 |
+
types_pb2.DT_COMPLEX64: onnx_pb.TensorProto.COMPLEX64,
|
| 44 |
+
types_pb2.DT_COMPLEX128: onnx_pb.TensorProto.COMPLEX128,
|
| 45 |
+
types_pb2.DT_BOOL: onnx_pb.TensorProto.BOOL,
|
| 46 |
+
types_pb2.DT_RESOURCE: onnx_pb.TensorProto.INT64, # TODO: hack to allow processing on control flow
|
| 47 |
+
types_pb2.DT_VARIANT: onnx_pb.TensorProto.UNDEFINED,
|
| 48 |
+
types_pb2.DT_QUINT8: onnx_pb.TensorProto.UINT8,
|
| 49 |
+
}
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
def tf_to_onnx_tensor(tensor, name=""):
|
| 53 |
+
"""Convert tensorflow tensor to onnx tensor."""
|
| 54 |
+
np_data = get_tf_tensor_data(tensor)
|
| 55 |
+
if np_data.dtype == np.object:
|
| 56 |
+
# assume np_data is string, numpy_helper.from_array accepts ndarray,
|
| 57 |
+
# in which each item is of str while the whole dtype is of object.
|
| 58 |
+
try:
|
| 59 |
+
# Faster but fails on Unicode
|
| 60 |
+
np_data = np_data.astype(np.str).astype(np.object)
|
| 61 |
+
except UnicodeDecodeError:
|
| 62 |
+
decode = np.vectorize(lambda x: x.decode('UTF-8'))
|
| 63 |
+
np_data = decode(np_data).astype(np.object)
|
| 64 |
+
except: # pylint: disable=bare-except
|
| 65 |
+
raise RuntimeError("Not support type: {}".format(type(np_data.flat[0])))
|
| 66 |
+
return numpy_helper.from_array(np_data, name=name)
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
def get_tf_tensor_data(tensor):
|
| 70 |
+
"""Get data from tensor."""
|
| 71 |
+
make_sure(isinstance(tensor, tensor_pb2.TensorProto), "Require TensorProto")
|
| 72 |
+
np_data = tensor_util.MakeNdarray(tensor)
|
| 73 |
+
make_sure(isinstance(np_data, np.ndarray), "%r isn't ndarray", np_data)
|
| 74 |
+
return np_data
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
def get_tf_const_value(op, as_list=True):
|
| 78 |
+
"""
|
| 79 |
+
If as_list=True, return the array as a (possibly nested) list.
|
| 80 |
+
Otherwise, return data of type np.ndarray.
|
| 81 |
+
|
| 82 |
+
If a tensor is a scalar having value 1,
|
| 83 |
+
when as_list=False, return np.array(1), type is <class 'numpy.ndarray'>
|
| 84 |
+
when as_list=True, return 1, type is <class 'int'>.
|
| 85 |
+
"""
|
| 86 |
+
make_sure(is_tf_const_op(op), "%r isn't a const op", op.name)
|
| 87 |
+
value = get_tf_tensor_data(op.get_attr("value"))
|
| 88 |
+
if as_list:
|
| 89 |
+
value = value.tolist()
|
| 90 |
+
return value
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
def get_tf_shape_attr(node):
|
| 94 |
+
"""Get shape from tensorflow attr "shape"."""
|
| 95 |
+
dims = None
|
| 96 |
+
try:
|
| 97 |
+
shape = get_tf_node_attr(node, "shape")
|
| 98 |
+
if not shape.unknown_rank:
|
| 99 |
+
dims = [int(d.size) for d in shape.dim]
|
| 100 |
+
except: # pylint: disable=bare-except
|
| 101 |
+
pass
|
| 102 |
+
return dims
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
def get_tf_tensor_shape(tensor):
|
| 106 |
+
shape = []
|
| 107 |
+
try:
|
| 108 |
+
shape = tensor.get_shape().as_list()
|
| 109 |
+
except Exception: # pylint: disable=broad-except
|
| 110 |
+
shape = None
|
| 111 |
+
return shape
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
def map_tf_dtype(dtype):
|
| 115 |
+
if dtype:
|
| 116 |
+
dtype = TF_TO_ONNX_DTYPE[dtype]
|
| 117 |
+
return dtype
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
def get_tf_node_attr(node, name):
|
| 121 |
+
"""Parser TF node attribute."""
|
| 122 |
+
return node.get_attr(name)
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
def get_tf_version():
|
| 126 |
+
return LooseVersion(tf.__version__)
|
| 127 |
+
|
| 128 |
+
def compress_graph_def(graph_def):
|
| 129 |
+
"""
|
| 130 |
+
Remove large const values from graph. This lets us import the graph and run shape inference without TF crashing.
|
| 131 |
+
"""
|
| 132 |
+
node_defs = list(graph_def.node)
|
| 133 |
+
const_node_values = {}
|
| 134 |
+
for node_def in node_defs:
|
| 135 |
+
if node_def.op == 'Const':
|
| 136 |
+
tensor = node_def.attr["value"].tensor
|
| 137 |
+
# Small constants are sometimes used to store shape information and must be maintained
|
| 138 |
+
if len(tensor.tensor_content) > 1000:
|
| 139 |
+
make_sure(node_def.name not in const_node_values, "Two nodes in graph have same name %s", node_def.name)
|
| 140 |
+
const_node_values[node_def.name] = tensor.tensor_content
|
| 141 |
+
tensor.tensor_content = b''
|
| 142 |
+
return const_node_values
|
| 143 |
+
|
| 144 |
+
def get_index_from_strided_slice_of_shape(node, outputs_to_values):
|
| 145 |
+
"""Returns the index of the dimension that the strided slice is reading from the shape node or None"""
|
| 146 |
+
attr_vals = {
|
| 147 |
+
'shrink_axis_mask': 1,
|
| 148 |
+
'ellipsis_mask': 0,
|
| 149 |
+
'begin_mask': 0,
|
| 150 |
+
'new_axis_mask': 0,
|
| 151 |
+
'end_mask': 0
|
| 152 |
+
}
|
| 153 |
+
for a in node.node_def.attr:
|
| 154 |
+
if a in attr_vals:
|
| 155 |
+
i = get_tf_node_attr(node, a)
|
| 156 |
+
if i != attr_vals[a]:
|
| 157 |
+
return None
|
| 158 |
+
i1 = outputs_to_values.get(node.inputs[1].name)
|
| 159 |
+
i2 = outputs_to_values.get(node.inputs[2].name)
|
| 160 |
+
i3 = outputs_to_values.get(node.inputs[3].name)
|
| 161 |
+
if i1 is None or i2 is None or i3 is None:
|
| 162 |
+
return None
|
| 163 |
+
if i1.shape != (1,) or i2.shape != (1,) or i3.shape != (1,):
|
| 164 |
+
return None
|
| 165 |
+
i1, i2, i3 = i1[0], i2[0], i3[0]
|
| 166 |
+
if i1 + 1 != i2 or i3 != 1:
|
| 167 |
+
return None
|
| 168 |
+
return i1
|
| 169 |
+
|
| 170 |
+
def compute_const_folding_using_tf(g, const_node_values, graph_outputs):
|
| 171 |
+
"""Find nodes with constant inputs and compute their values using TF"""
|
| 172 |
+
if const_node_values is None:
|
| 173 |
+
const_node_values = {}
|
| 174 |
+
graph_outputs = set(graph_outputs)
|
| 175 |
+
from tf2onnx.tf_loader import tf_session, tf_placeholder # pylint: disable=import-outside-toplevel
|
| 176 |
+
|
| 177 |
+
ops = g.get_operations()
|
| 178 |
+
outputs_to_values = {}
|
| 179 |
+
outputs_to_dtypes = {}
|
| 180 |
+
outputs_to_shapes = {}
|
| 181 |
+
shape_node_outputs = {}
|
| 182 |
+
|
| 183 |
+
def is_small_shape(x):
|
| 184 |
+
return np.product(x) <= 1000
|
| 185 |
+
|
| 186 |
+
def is_huge_shape(x):
|
| 187 |
+
return np.product(x) >= 1000000
|
| 188 |
+
|
| 189 |
+
for node in ops:
|
| 190 |
+
# Load values of constants. Use const_node_values if possible
|
| 191 |
+
if node.type in ["Const", "ConstV2"]:
|
| 192 |
+
tensor = node.node_def.attr["value"].tensor
|
| 193 |
+
if node.name in const_node_values:
|
| 194 |
+
tensor.tensor_content = const_node_values[node.name]
|
| 195 |
+
outputs_to_values[node.outputs[0].name] = get_tf_tensor_data(tensor)
|
| 196 |
+
outputs_to_dtypes[node.outputs[0].name] = node.outputs[0].dtype
|
| 197 |
+
for out in node.outputs:
|
| 198 |
+
outputs_to_shapes[out.name] = get_tf_tensor_shape(out)
|
| 199 |
+
|
| 200 |
+
for node in ops:
|
| 201 |
+
if node.type == "Shape":
|
| 202 |
+
shape = outputs_to_shapes.get(node.inputs[0].name)
|
| 203 |
+
if shape is not None:
|
| 204 |
+
shape_node_outputs[node.outputs[0].name] = shape
|
| 205 |
+
|
| 206 |
+
unneeded_outputs = set()
|
| 207 |
+
progress = True
|
| 208 |
+
while progress:
|
| 209 |
+
progress = False
|
| 210 |
+
for node in ops:
|
| 211 |
+
# Find ops with constant inputs and compute their values
|
| 212 |
+
input_names = [i.name for i in node.inputs]
|
| 213 |
+
output_names = [i.name for i in node.outputs]
|
| 214 |
+
if node.type == 'StridedSlice' and input_names[0] in shape_node_outputs \
|
| 215 |
+
and output_names[0] not in outputs_to_values:
|
| 216 |
+
shape = shape_node_outputs[input_names[0]]
|
| 217 |
+
i = get_index_from_strided_slice_of_shape(node, outputs_to_values)
|
| 218 |
+
if i is not None and 0 <= i < len(shape) and shape[i] is not None:
|
| 219 |
+
np_dtype = map_onnx_to_numpy_type(map_tf_dtype(node.outputs[0].dtype))
|
| 220 |
+
outputs_to_values[output_names[0]] = np.array(shape[i], dtype=np_dtype)
|
| 221 |
+
outputs_to_dtypes[node.outputs[0].name] = node.outputs[0].dtype
|
| 222 |
+
progress = True
|
| 223 |
+
can_fold = node.type not in ['Enter', 'Placeholder', 'PlaceholderWithDefault']
|
| 224 |
+
can_fold = can_fold and not node.type.startswith('Random')
|
| 225 |
+
can_fold = can_fold and len(input_names) > 0 and all(inp in outputs_to_values for inp in input_names)
|
| 226 |
+
# We can only fold nodes with a single output
|
| 227 |
+
can_fold = can_fold and len(output_names) == 1 and output_names[0] not in outputs_to_values
|
| 228 |
+
# Skip if value already computed, used, and discarded
|
| 229 |
+
can_fold = can_fold and output_names[0] not in unneeded_outputs and output_names[0] not in graph_outputs
|
| 230 |
+
if can_fold:
|
| 231 |
+
# Make a mini graph containing just the node to fold
|
| 232 |
+
g2 = tf.Graph()
|
| 233 |
+
with g2.as_default():
|
| 234 |
+
for inp in input_names:
|
| 235 |
+
tf_placeholder(outputs_to_dtypes[inp], name=inp.split(':')[0])
|
| 236 |
+
mini_graph_def = g2.as_graph_def()
|
| 237 |
+
mini_graph_def.node.append(node.node_def)
|
| 238 |
+
g3 = tf.Graph()
|
| 239 |
+
with g3.as_default():
|
| 240 |
+
feed_dict = {}
|
| 241 |
+
inp_shapes = []
|
| 242 |
+
for inp in input_names:
|
| 243 |
+
inp_np = outputs_to_values[inp]
|
| 244 |
+
feed_dict[inp] = inp_np
|
| 245 |
+
inp_shapes.append(inp_np.shape)
|
| 246 |
+
try:
|
| 247 |
+
with tf_session() as sess:
|
| 248 |
+
tf.import_graph_def(mini_graph_def, name='')
|
| 249 |
+
results = sess.run(output_names, feed_dict=feed_dict)
|
| 250 |
+
if is_huge_shape(results[0].shape) and all(is_small_shape(inp) for inp in inp_shapes):
|
| 251 |
+
logger.debug("Skipping folding of node %s since result shape %s is much larger "
|
| 252 |
+
"than input shapes %s", node.name, results[0].shape, inp_shapes)
|
| 253 |
+
else:
|
| 254 |
+
outputs_to_values[output_names[0]] = results[0]
|
| 255 |
+
outputs_to_dtypes[output_names[0]] = node.outputs[0].dtype
|
| 256 |
+
progress = True
|
| 257 |
+
except Exception: # pylint: disable=broad-except
|
| 258 |
+
logger.debug("Could not fold node %s", node.name)
|
| 259 |
+
unneeded_outputs.update(outputs_to_values.keys())
|
| 260 |
+
for node in ops:
|
| 261 |
+
# Mark values we need to keep
|
| 262 |
+
input_names = [i.name for i in node.inputs]
|
| 263 |
+
output_names = [i.name for i in node.outputs]
|
| 264 |
+
if len(output_names) == 1 and output_names[0] in outputs_to_values:
|
| 265 |
+
continue
|
| 266 |
+
for i in input_names:
|
| 267 |
+
if i in unneeded_outputs:
|
| 268 |
+
unneeded_outputs.remove(i)
|
| 269 |
+
for node in unneeded_outputs:
|
| 270 |
+
# Remove unneeded values to prevent memory usage explosion
|
| 271 |
+
if node in outputs_to_values:
|
| 272 |
+
del outputs_to_values[node]
|
| 273 |
+
del outputs_to_dtypes[node]
|
| 274 |
+
|
| 275 |
+
for node in ops:
|
| 276 |
+
# We don't need the constants any more
|
| 277 |
+
if node.type in ["Const", "ConstV2"] and node.outputs[0].name in outputs_to_values:
|
| 278 |
+
del outputs_to_values[node.outputs[0].name]
|
| 279 |
+
del outputs_to_dtypes[node.outputs[0].name]
|
| 280 |
+
|
| 281 |
+
logger.info("Computed %d values for constant folding", len(outputs_to_values))
|
| 282 |
+
return outputs_to_values, outputs_to_dtypes
|
| 283 |
+
|
| 284 |
+
def get_hash_table_info(nodes_or_graph_def):
|
| 285 |
+
"""
|
| 286 |
+
Return lists of the shared_names, key_dtypes, and value_dtypes of all hash tables declared in the graph_def
|
| 287 |
+
or list of nodes
|
| 288 |
+
"""
|
| 289 |
+
if isinstance(nodes_or_graph_def, graph_pb2.GraphDef):
|
| 290 |
+
nodes = nodes_or_graph_def.node
|
| 291 |
+
else:
|
| 292 |
+
nodes = nodes_or_graph_def
|
| 293 |
+
names = []
|
| 294 |
+
key_dtypes = []
|
| 295 |
+
val_dtypes = []
|
| 296 |
+
for n in nodes:
|
| 297 |
+
if n.op in ["HashTableV2", "MutableHashTableV2"]:
|
| 298 |
+
if all(k in n.attr for k in ['shared_name', 'key_dtype', 'value_dtype']):
|
| 299 |
+
name = n.attr['shared_name'].s
|
| 300 |
+
if name != b'':
|
| 301 |
+
names.append(name)
|
| 302 |
+
key_dtypes.append(n.attr['key_dtype'].type)
|
| 303 |
+
val_dtypes.append(n.attr['value_dtype'].type)
|
| 304 |
+
return names, key_dtypes, val_dtypes
|
| 305 |
+
|
| 306 |
+
def replace_placeholders_with_tables(graph_def, placeholder_to_table_info):
|
| 307 |
+
"""
|
| 308 |
+
Given a graph_def and a map from placeholder names to a tuple of table names, key dtypes, and value dtypes,
|
| 309 |
+
Replaces placeholder ops in the graph_def with HashTableV2 ops
|
| 310 |
+
"""
|
| 311 |
+
for n in graph_def.node:
|
| 312 |
+
if n.op == "Placeholder" and n.name in placeholder_to_table_info:
|
| 313 |
+
name, key_dtype, val_dtype = placeholder_to_table_info[n.name]
|
| 314 |
+
for a in list(n.attr):
|
| 315 |
+
del n.attr[a]
|
| 316 |
+
n.op = "HashTableV2"
|
| 317 |
+
n.attr['shared_name'].s = name
|
| 318 |
+
n.attr['key_dtype'].type = key_dtype
|
| 319 |
+
n.attr['value_dtype'].type = val_dtype
|
| 320 |
+
|
| 321 |
+
def read_tf_node_def_attrs(node_def, input_dtypes, input_shapes):
|
| 322 |
+
"""Given a tf node def, returns a dict of attribute names to values"""
|
| 323 |
+
from tf2onnx.tf_loader import tf_session, tf_placeholder # pylint: disable=import-outside-toplevel
|
| 324 |
+
del node_def.input[:]
|
| 325 |
+
node_def.name = "node"
|
| 326 |
+
|
| 327 |
+
# read_tf_node_attrs uses some tf methods that require the node to be loaded into a valid TF graph
|
| 328 |
+
g = tf.Graph()
|
| 329 |
+
with g.as_default():
|
| 330 |
+
for i, (dtype, shape) in enumerate(zip(input_dtypes, input_shapes)):
|
| 331 |
+
inp = "input" + str(i)
|
| 332 |
+
tf_placeholder(dtype, name=inp, shape=shape)
|
| 333 |
+
node_def.input.append(inp)
|
| 334 |
+
mini_graph_def = g.as_graph_def()
|
| 335 |
+
mini_graph_def.node.append(node_def)
|
| 336 |
+
g2 = tf.Graph()
|
| 337 |
+
with g2.as_default():
|
| 338 |
+
with tf_session() as sess:
|
| 339 |
+
tf.import_graph_def(mini_graph_def, name='')
|
| 340 |
+
node = sess.graph.get_operation_by_name("node")
|
| 341 |
+
return read_tf_node_attrs(node)
|
| 342 |
+
|
| 343 |
+
|
| 344 |
+
def read_tf_node_attrs(node):
|
| 345 |
+
"""Given a tf Node, returns a dict of attribute names to values"""
|
| 346 |
+
attr = {}
|
| 347 |
+
attr_cnt = collections.Counter()
|
| 348 |
+
|
| 349 |
+
# ignore the following attributes
|
| 350 |
+
ignored_attr = {"T", "unknown_rank", "_class", "Tshape", "use_cudnn_on_gpu", "Index", "Tpaddings",
|
| 351 |
+
"TI", "Tparams", "Tindices", "Tlen", "Tdim", "Tin", "dynamic_size", "Tmultiples",
|
| 352 |
+
"Tblock_shape", "Tcrops", "index_type", "Taxis", "U", "maxval",
|
| 353 |
+
"Tout", "Tlabels", "Tindex", "element_shape", "Targmax", "Tperm", "Tcond",
|
| 354 |
+
"T_threshold", "element_dtype", "shape_type", "_lower_using_switch_merge",
|
| 355 |
+
"parallel_iterations", "_num_original_outputs", "output_types", "output_shapes",
|
| 356 |
+
"key_dtype", "value_dtype", "Tin", "Tout", "capacity", "component_types", "shapes",
|
| 357 |
+
"Toutput_types", "dense_shapes", "Tdense", "Tsegmentids", "Tshift", "Tnumsegments", "SrcT",
|
| 358 |
+
"body", "cond", "then_branch", "else_branch", "f",
|
| 359 |
+
"Tcomplex", "Treal", # For RFFT, Tcomplex is ignored because
|
| 360 |
+
# onnx.helper.make_node fails,
|
| 361 |
+
# TODO: it should be added back.
|
| 362 |
+
}
|
| 363 |
+
|
| 364 |
+
for a in node.node_def.attr:
|
| 365 |
+
attr_cnt[a] += 1
|
| 366 |
+
value = get_tf_node_attr(node, a)
|
| 367 |
+
if a in ignored_attr or isinstance(value, tensor_pb2.TensorProto):
|
| 368 |
+
pass
|
| 369 |
+
elif a == "shape":
|
| 370 |
+
shape = get_tf_shape_attr(node)
|
| 371 |
+
if shape is not None:
|
| 372 |
+
attr[a] = shape
|
| 373 |
+
elif a == "DstT":
|
| 374 |
+
attr["to"] = map_tf_dtype(value)
|
| 375 |
+
elif isinstance(value, tf.DType):
|
| 376 |
+
attr[a] = map_tf_dtype(value)
|
| 377 |
+
elif isinstance(value, list) and len(value) > 0 and isinstance(value[0], tf.DType):
|
| 378 |
+
attr[a] = [map_tf_dtype(v) for v in value]
|
| 379 |
+
else:
|
| 380 |
+
attr[a] = get_tf_node_attr(node, a)
|
| 381 |
+
|
| 382 |
+
return attr, attr_cnt
|
| 383 |
+
|
| 384 |
+
def tflist_to_onnx(g, shape_override, const_node_values=None, ignore_default=None, use_default=None):
|
| 385 |
+
"""
|
| 386 |
+
Convert the tf-node list into an onnx graph with minimal rewrites so
|
| 387 |
+
we can use the onnx graph as intermediate graph.
|
| 388 |
+
"""
|
| 389 |
+
|
| 390 |
+
node_list = g.get_operations()
|
| 391 |
+
functions = {}
|
| 392 |
+
|
| 393 |
+
# some stats
|
| 394 |
+
op_cnt = collections.Counter()
|
| 395 |
+
attr_cnt = collections.Counter()
|
| 396 |
+
onnx_nodes = []
|
| 397 |
+
output_shapes = {}
|
| 398 |
+
dtypes = {}
|
| 399 |
+
|
| 400 |
+
# find outputs
|
| 401 |
+
ops = node_list
|
| 402 |
+
|
| 403 |
+
# create dict with output to shape mappings
|
| 404 |
+
for node in ops:
|
| 405 |
+
for out in node.outputs:
|
| 406 |
+
shape = shape_override.get(out.name)
|
| 407 |
+
if shape is None:
|
| 408 |
+
shape = get_tf_tensor_shape(out)
|
| 409 |
+
dtypes[out.name] = map_tf_dtype(out.dtype)
|
| 410 |
+
output_shapes[out.name] = shape
|
| 411 |
+
|
| 412 |
+
for node in ops:
|
| 413 |
+
attr, new_attr_cnt = read_tf_node_attrs(node)
|
| 414 |
+
attr_cnt += new_attr_cnt
|
| 415 |
+
takeit = True
|
| 416 |
+
op_cnt[node.type] += 1
|
| 417 |
+
for a in node.node_def.attr:
|
| 418 |
+
attr_cnt[a] += 1
|
| 419 |
+
value = get_tf_node_attr(node, a)
|
| 420 |
+
if a == "T":
|
| 421 |
+
if value and not isinstance(value, list):
|
| 422 |
+
dtypes[node.name] = map_tf_dtype(value)
|
| 423 |
+
elif a in {"body", "cond", "then_branch", "else_branch", "f"}:
|
| 424 |
+
input_shapes = [inp.get_shape() for inp in node.inputs]
|
| 425 |
+
nattr = get_tf_node_attr(node, a)
|
| 426 |
+
attr[a] = nattr.name
|
| 427 |
+
functions[nattr.name] = input_shapes
|
| 428 |
+
elif isinstance(value, tensor_pb2.TensorProto):
|
| 429 |
+
if const_node_values and node.name in const_node_values:
|
| 430 |
+
value.tensor_content = const_node_values[node.name]
|
| 431 |
+
onnx_tensor = tf_to_onnx_tensor(value, name=port_name(node.name))
|
| 432 |
+
attr[a] = onnx_tensor
|
| 433 |
+
|
| 434 |
+
node_type = node.type
|
| 435 |
+
input_names = [i.name for i in node.inputs]
|
| 436 |
+
output_names = [i.name for i in node.outputs]
|
| 437 |
+
|
| 438 |
+
if node_type == 'PlaceholderWithDefault':
|
| 439 |
+
if ignore_default and node.name in ignore_default:
|
| 440 |
+
node_type = 'Placeholder'
|
| 441 |
+
input_names = []
|
| 442 |
+
elif use_default and node.name in use_default:
|
| 443 |
+
node_type = 'Identity'
|
| 444 |
+
|
| 445 |
+
if takeit:
|
| 446 |
+
try:
|
| 447 |
+
onnx_node = helper.make_node(node_type, input_names, output_names, name=node.name, **attr)
|
| 448 |
+
onnx_nodes.append(onnx_node)
|
| 449 |
+
except Exception as ex:
|
| 450 |
+
logger.error("pass1 convert failed for %s, ex=%s", node, ex)
|
| 451 |
+
raise
|
| 452 |
+
|
| 453 |
+
return onnx_nodes, op_cnt, attr_cnt, output_shapes, dtypes, functions
|
| 454 |
+
|
| 455 |
+
|
| 456 |
+
def tensorflow_to_onnx(graph, shape_override, const_node_values=None, ignore_default=None, use_default=None):
|
| 457 |
+
"""
|
| 458 |
+
Load tensorflow graph and do a conversion.
|
| 459 |
+
"""
|
| 460 |
+
return tflist_to_onnx(graph, shape_override, const_node_values, ignore_default, use_default)
|
lib/python3.10/site-packages/tf2onnx/tflite/AbsOptions.py
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
|
| 3 |
+
# automatically generated by the FlatBuffers compiler, do not modify
|
| 4 |
+
|
| 5 |
+
# namespace: tflite
|
| 6 |
+
|
| 7 |
+
import flatbuffers
|
| 8 |
+
from flatbuffers.compat import import_numpy
|
| 9 |
+
np = import_numpy()
|
| 10 |
+
|
| 11 |
+
class AbsOptions(object):
|
| 12 |
+
__slots__ = ['_tab']
|
| 13 |
+
|
| 14 |
+
@classmethod
|
| 15 |
+
def GetRootAsAbsOptions(cls, buf, offset):
|
| 16 |
+
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
|
| 17 |
+
x = AbsOptions()
|
| 18 |
+
x.Init(buf, n + offset)
|
| 19 |
+
return x
|
| 20 |
+
|
| 21 |
+
@classmethod
|
| 22 |
+
def AbsOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
|
| 23 |
+
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
|
| 24 |
+
|
| 25 |
+
# AbsOptions
|
| 26 |
+
def Init(self, buf, pos):
|
| 27 |
+
self._tab = flatbuffers.table.Table(buf, pos)
|
| 28 |
+
|
| 29 |
+
def AbsOptionsStart(builder): builder.StartObject(0)
|
| 30 |
+
def AbsOptionsEnd(builder): return builder.EndObject()
|
lib/python3.10/site-packages/tf2onnx/tflite/ActivationFunctionType.py
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
|
| 3 |
+
# automatically generated by the FlatBuffers compiler, do not modify
|
| 4 |
+
|
| 5 |
+
# namespace: tflite
|
| 6 |
+
|
| 7 |
+
class ActivationFunctionType(object):
|
| 8 |
+
NONE = 0
|
| 9 |
+
RELU = 1
|
| 10 |
+
RELU_N1_TO_1 = 2
|
| 11 |
+
RELU6 = 3
|
| 12 |
+
TANH = 4
|
| 13 |
+
SIGN_BIT = 5
|
| 14 |
+
|
lib/python3.10/site-packages/tf2onnx/tflite/ArgMinOptions.py
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
|
| 3 |
+
# automatically generated by the FlatBuffers compiler, do not modify
|
| 4 |
+
|
| 5 |
+
# namespace: tflite
|
| 6 |
+
|
| 7 |
+
import flatbuffers
|
| 8 |
+
from flatbuffers.compat import import_numpy
|
| 9 |
+
np = import_numpy()
|
| 10 |
+
|
| 11 |
+
class ArgMinOptions(object):
|
| 12 |
+
__slots__ = ['_tab']
|
| 13 |
+
|
| 14 |
+
@classmethod
|
| 15 |
+
def GetRootAsArgMinOptions(cls, buf, offset):
|
| 16 |
+
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
|
| 17 |
+
x = ArgMinOptions()
|
| 18 |
+
x.Init(buf, n + offset)
|
| 19 |
+
return x
|
| 20 |
+
|
| 21 |
+
@classmethod
|
| 22 |
+
def ArgMinOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
|
| 23 |
+
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
|
| 24 |
+
|
| 25 |
+
# ArgMinOptions
|
| 26 |
+
def Init(self, buf, pos):
|
| 27 |
+
self._tab = flatbuffers.table.Table(buf, pos)
|
| 28 |
+
|
| 29 |
+
# ArgMinOptions
|
| 30 |
+
def OutputType(self):
|
| 31 |
+
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
|
| 32 |
+
if o != 0:
|
| 33 |
+
return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
|
| 34 |
+
return 0
|
| 35 |
+
|
| 36 |
+
def ArgMinOptionsStart(builder): builder.StartObject(1)
|
| 37 |
+
def ArgMinOptionsAddOutputType(builder, outputType): builder.PrependInt8Slot(0, outputType, 0)
|
| 38 |
+
def ArgMinOptionsEnd(builder): return builder.EndObject()
|
lib/python3.10/site-packages/tf2onnx/tflite/CosOptions.py
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
|
| 3 |
+
# automatically generated by the FlatBuffers compiler, do not modify
|
| 4 |
+
|
| 5 |
+
# namespace: tflite
|
| 6 |
+
|
| 7 |
+
import flatbuffers
|
| 8 |
+
from flatbuffers.compat import import_numpy
|
| 9 |
+
np = import_numpy()
|
| 10 |
+
|
| 11 |
+
class CosOptions(object):
|
| 12 |
+
__slots__ = ['_tab']
|
| 13 |
+
|
| 14 |
+
@classmethod
|
| 15 |
+
def GetRootAsCosOptions(cls, buf, offset):
|
| 16 |
+
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
|
| 17 |
+
x = CosOptions()
|
| 18 |
+
x.Init(buf, n + offset)
|
| 19 |
+
return x
|
| 20 |
+
|
| 21 |
+
@classmethod
|
| 22 |
+
def CosOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
|
| 23 |
+
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
|
| 24 |
+
|
| 25 |
+
# CosOptions
|
| 26 |
+
def Init(self, buf, pos):
|
| 27 |
+
self._tab = flatbuffers.table.Table(buf, pos)
|
| 28 |
+
|
| 29 |
+
def CosOptionsStart(builder): builder.StartObject(0)
|
| 30 |
+
def CosOptionsEnd(builder): return builder.EndObject()
|
lib/python3.10/site-packages/tf2onnx/tflite/DepthToSpaceOptions.py
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
|
| 3 |
+
# automatically generated by the FlatBuffers compiler, do not modify
|
| 4 |
+
|
| 5 |
+
# namespace: tflite
|
| 6 |
+
|
| 7 |
+
import flatbuffers
|
| 8 |
+
from flatbuffers.compat import import_numpy
|
| 9 |
+
np = import_numpy()
|
| 10 |
+
|
| 11 |
+
class DepthToSpaceOptions(object):
|
| 12 |
+
__slots__ = ['_tab']
|
| 13 |
+
|
| 14 |
+
@classmethod
|
| 15 |
+
def GetRootAsDepthToSpaceOptions(cls, buf, offset):
|
| 16 |
+
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
|
| 17 |
+
x = DepthToSpaceOptions()
|
| 18 |
+
x.Init(buf, n + offset)
|
| 19 |
+
return x
|
| 20 |
+
|
| 21 |
+
@classmethod
|
| 22 |
+
def DepthToSpaceOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
|
| 23 |
+
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
|
| 24 |
+
|
| 25 |
+
# DepthToSpaceOptions
|
| 26 |
+
def Init(self, buf, pos):
|
| 27 |
+
self._tab = flatbuffers.table.Table(buf, pos)
|
| 28 |
+
|
| 29 |
+
# DepthToSpaceOptions
|
| 30 |
+
def BlockSize(self):
|
| 31 |
+
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
|
| 32 |
+
if o != 0:
|
| 33 |
+
return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
|
| 34 |
+
return 0
|
| 35 |
+
|
| 36 |
+
def DepthToSpaceOptionsStart(builder): builder.StartObject(1)
|
| 37 |
+
def DepthToSpaceOptionsAddBlockSize(builder, blockSize): builder.PrependInt32Slot(0, blockSize, 0)
|
| 38 |
+
def DepthToSpaceOptionsEnd(builder): return builder.EndObject()
|
lib/python3.10/site-packages/tf2onnx/tflite/DepthwiseConv2DOptions.py
ADDED
|
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
|
| 3 |
+
# automatically generated by the FlatBuffers compiler, do not modify
|
| 4 |
+
|
| 5 |
+
# namespace: tflite
|
| 6 |
+
|
| 7 |
+
import flatbuffers
|
| 8 |
+
from flatbuffers.compat import import_numpy
|
| 9 |
+
np = import_numpy()
|
| 10 |
+
|
| 11 |
+
class DepthwiseConv2DOptions(object):
|
| 12 |
+
__slots__ = ['_tab']
|
| 13 |
+
|
| 14 |
+
@classmethod
|
| 15 |
+
def GetRootAsDepthwiseConv2DOptions(cls, buf, offset):
|
| 16 |
+
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
|
| 17 |
+
x = DepthwiseConv2DOptions()
|
| 18 |
+
x.Init(buf, n + offset)
|
| 19 |
+
return x
|
| 20 |
+
|
| 21 |
+
@classmethod
|
| 22 |
+
def DepthwiseConv2DOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
|
| 23 |
+
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
|
| 24 |
+
|
| 25 |
+
# DepthwiseConv2DOptions
|
| 26 |
+
def Init(self, buf, pos):
|
| 27 |
+
self._tab = flatbuffers.table.Table(buf, pos)
|
| 28 |
+
|
| 29 |
+
# DepthwiseConv2DOptions
|
| 30 |
+
def Padding(self):
|
| 31 |
+
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
|
| 32 |
+
if o != 0:
|
| 33 |
+
return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
|
| 34 |
+
return 0
|
| 35 |
+
|
| 36 |
+
# DepthwiseConv2DOptions
|
| 37 |
+
def StrideW(self):
|
| 38 |
+
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
|
| 39 |
+
if o != 0:
|
| 40 |
+
return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
|
| 41 |
+
return 0
|
| 42 |
+
|
| 43 |
+
# DepthwiseConv2DOptions
|
| 44 |
+
def StrideH(self):
|
| 45 |
+
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
|
| 46 |
+
if o != 0:
|
| 47 |
+
return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
|
| 48 |
+
return 0
|
| 49 |
+
|
| 50 |
+
# DepthwiseConv2DOptions
|
| 51 |
+
def DepthMultiplier(self):
|
| 52 |
+
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
|
| 53 |
+
if o != 0:
|
| 54 |
+
return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
|
| 55 |
+
return 0
|
| 56 |
+
|
| 57 |
+
# DepthwiseConv2DOptions
|
| 58 |
+
def FusedActivationFunction(self):
|
| 59 |
+
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
|
| 60 |
+
if o != 0:
|
| 61 |
+
return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
|
| 62 |
+
return 0
|
| 63 |
+
|
| 64 |
+
# DepthwiseConv2DOptions
|
| 65 |
+
def DilationWFactor(self):
|
| 66 |
+
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
|
| 67 |
+
if o != 0:
|
| 68 |
+
return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
|
| 69 |
+
return 1
|
| 70 |
+
|
| 71 |
+
# DepthwiseConv2DOptions
|
| 72 |
+
def DilationHFactor(self):
|
| 73 |
+
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16))
|
| 74 |
+
if o != 0:
|
| 75 |
+
return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
|
| 76 |
+
return 1
|
| 77 |
+
|
| 78 |
+
def DepthwiseConv2DOptionsStart(builder): builder.StartObject(7)
|
| 79 |
+
def DepthwiseConv2DOptionsAddPadding(builder, padding): builder.PrependInt8Slot(0, padding, 0)
|
| 80 |
+
def DepthwiseConv2DOptionsAddStrideW(builder, strideW): builder.PrependInt32Slot(1, strideW, 0)
|
| 81 |
+
def DepthwiseConv2DOptionsAddStrideH(builder, strideH): builder.PrependInt32Slot(2, strideH, 0)
|
| 82 |
+
def DepthwiseConv2DOptionsAddDepthMultiplier(builder, depthMultiplier): builder.PrependInt32Slot(3, depthMultiplier, 0)
|
| 83 |
+
def DepthwiseConv2DOptionsAddFusedActivationFunction(builder, fusedActivationFunction): builder.PrependInt8Slot(4, fusedActivationFunction, 0)
|
| 84 |
+
def DepthwiseConv2DOptionsAddDilationWFactor(builder, dilationWFactor): builder.PrependInt32Slot(5, dilationWFactor, 1)
|
| 85 |
+
def DepthwiseConv2DOptionsAddDilationHFactor(builder, dilationHFactor): builder.PrependInt32Slot(6, dilationHFactor, 1)
|
| 86 |
+
def DepthwiseConv2DOptionsEnd(builder): return builder.EndObject()
|
lib/python3.10/site-packages/tf2onnx/tflite/FillOptions.py
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
|
| 3 |
+
# automatically generated by the FlatBuffers compiler, do not modify
|
| 4 |
+
|
| 5 |
+
# namespace: tflite
|
| 6 |
+
|
| 7 |
+
import flatbuffers
|
| 8 |
+
from flatbuffers.compat import import_numpy
|
| 9 |
+
np = import_numpy()
|
| 10 |
+
|
| 11 |
+
class FillOptions(object):
|
| 12 |
+
__slots__ = ['_tab']
|
| 13 |
+
|
| 14 |
+
@classmethod
|
| 15 |
+
def GetRootAsFillOptions(cls, buf, offset):
|
| 16 |
+
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
|
| 17 |
+
x = FillOptions()
|
| 18 |
+
x.Init(buf, n + offset)
|
| 19 |
+
return x
|
| 20 |
+
|
| 21 |
+
@classmethod
|
| 22 |
+
def FillOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
|
| 23 |
+
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
|
| 24 |
+
|
| 25 |
+
# FillOptions
|
| 26 |
+
def Init(self, buf, pos):
|
| 27 |
+
self._tab = flatbuffers.table.Table(buf, pos)
|
| 28 |
+
|
| 29 |
+
def FillOptionsStart(builder): builder.StartObject(0)
|
| 30 |
+
def FillOptionsEnd(builder): return builder.EndObject()
|
lib/python3.10/site-packages/tf2onnx/tflite/GatherOptions.py
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
|
| 3 |
+
# automatically generated by the FlatBuffers compiler, do not modify
|
| 4 |
+
|
| 5 |
+
# namespace: tflite
|
| 6 |
+
|
| 7 |
+
import flatbuffers
|
| 8 |
+
from flatbuffers.compat import import_numpy
|
| 9 |
+
np = import_numpy()
|
| 10 |
+
|
| 11 |
+
class GatherOptions(object):
|
| 12 |
+
__slots__ = ['_tab']
|
| 13 |
+
|
| 14 |
+
@classmethod
|
| 15 |
+
def GetRootAsGatherOptions(cls, buf, offset):
|
| 16 |
+
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
|
| 17 |
+
x = GatherOptions()
|
| 18 |
+
x.Init(buf, n + offset)
|
| 19 |
+
return x
|
| 20 |
+
|
| 21 |
+
@classmethod
|
| 22 |
+
def GatherOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
|
| 23 |
+
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
|
| 24 |
+
|
| 25 |
+
# GatherOptions
|
| 26 |
+
def Init(self, buf, pos):
|
| 27 |
+
self._tab = flatbuffers.table.Table(buf, pos)
|
| 28 |
+
|
| 29 |
+
# GatherOptions
|
| 30 |
+
def Axis(self):
|
| 31 |
+
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
|
| 32 |
+
if o != 0:
|
| 33 |
+
return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
|
| 34 |
+
return 0
|
| 35 |
+
|
| 36 |
+
def GatherOptionsStart(builder): builder.StartObject(1)
|
| 37 |
+
def GatherOptionsAddAxis(builder, axis): builder.PrependInt32Slot(0, axis, 0)
|
| 38 |
+
def GatherOptionsEnd(builder): return builder.EndObject()
|
lib/python3.10/site-packages/tf2onnx/tflite/GreaterOptions.py
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 2 |
+
|
| 3 |
+
# automatically generated by the FlatBuffers compiler, do not modify
|
| 4 |
+
|
| 5 |
+
# namespace: tflite
|
| 6 |
+
|
| 7 |
+
import flatbuffers
|
| 8 |
+
from flatbuffers.compat import import_numpy
|
| 9 |
+
np = import_numpy()
|
| 10 |
+
|
| 11 |
+
class GreaterOptions(object):
|
| 12 |
+
__slots__ = ['_tab']
|
| 13 |
+
|
| 14 |
+
@classmethod
|
| 15 |
+
def GetRootAsGreaterOptions(cls, buf, offset):
|
| 16 |
+
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
|
| 17 |
+
x = GreaterOptions()
|
| 18 |
+
x.Init(buf, n + offset)
|
| 19 |
+
return x
|
| 20 |
+
|
| 21 |
+
@classmethod
|
| 22 |
+
def GreaterOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
|
| 23 |
+
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
|
| 24 |
+
|
| 25 |
+
# GreaterOptions
|
| 26 |
+
def Init(self, buf, pos):
|
| 27 |
+
self._tab = flatbuffers.table.Table(buf, pos)
|
| 28 |
+
|
| 29 |
+
def GreaterOptionsStart(builder): builder.StartObject(0)
|
| 30 |
+
def GreaterOptionsEnd(builder): return builder.EndObject()
|